| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812148131481414815148161481714818148191482014821148221482314824148251482614827148281482914830148311483214833148341483514836148371483814839148401484114842148431484414845148461484714848148491485014851148521485314854148551485614857148581485914860148611486214863148641486514866148671486814869148701487114872148731487414875148761487714878148791488014881148821488314884148851488614887148881488914890148911489214893148941489514896148971489814899149001490114902149031490414905149061490714908149091491014911149121491314914149151491614917149181491914920149211492214923149241492514926149271492814929149301493114932149331493414935149361493714938149391494014941149421494314944149451494614947149481494914950149511495214953149541495514956149571495814959149601496114962149631496414965149661496714968149691497014971149721497314974149751497614977149781497914980149811498214983149841498514986149871498814989149901499114992149931499414995149961499714998149991500015001150021500315004150051500615007150081500915010150111501215013150141501515016150171501815019150201502115022150231502415025150261502715028150291503015031150321503315034150351503615037150381503915040150411504215043150441504515046150471504815049150501505115052150531505415055150561505715058150591506015061150621506315064150651506615067150681506915070150711507215073150741507515076150771507815079150801508115082150831508415085150861508715088150891509015091150921509315094150951509615097150981509915100151011510215103151041510515106151071510815109151101511115112151131511415115151161511715118151191512015121151221512315124151251512615127151281512915130151311513215133151341513515136151371513815139151401514115142151431514415145151461514715148151491515015151151521515315154151551515615157151581515915160151611516215163151641516515166151671516815169151701517115172151731517415175151761517715178151791518015181151821518315184151851518615187151881518915190151911519215193151941519515196151971519815199152001520115202152031520415205152061520715208152091521015211152121521315214152151521615217152181521915220152211522215223152241522515226152271522815229152301523115232152331523415235152361523715238152391524015241152421524315244152451524615247152481524915250152511525215253152541525515256152571525815259152601526115262152631526415265152661526715268152691527015271152721527315274152751527615277152781527915280152811528215283152841528515286152871528815289152901529115292152931529415295152961529715298152991530015301153021530315304153051530615307153081530915310153111531215313153141531515316153171531815319153201532115322153231532415325153261532715328153291533015331153321533315334153351533615337153381533915340153411534215343153441534515346153471534815349153501535115352153531535415355153561535715358153591536015361153621536315364153651536615367153681536915370153711537215373153741537515376153771537815379153801538115382153831538415385153861538715388153891539015391153921539315394153951539615397153981539915400154011540215403154041540515406154071540815409154101541115412154131541415415154161541715418154191542015421154221542315424154251542615427154281542915430154311543215433154341543515436154371543815439154401544115442154431544415445154461544715448154491545015451154521545315454154551545615457154581545915460154611546215463154641546515466154671546815469154701547115472154731547415475154761547715478154791548015481154821548315484154851548615487154881548915490154911549215493154941549515496154971549815499155001550115502155031550415505155061550715508155091551015511155121551315514155151551615517155181551915520155211552215523155241552515526155271552815529155301553115532155331553415535155361553715538155391554015541155421554315544155451554615547155481554915550155511555215553155541555515556155571555815559155601556115562155631556415565155661556715568155691557015571155721557315574155751557615577155781557915580155811558215583155841558515586155871558815589155901559115592155931559415595155961559715598155991560015601156021560315604156051560615607156081560915610156111561215613156141561515616156171561815619156201562115622156231562415625156261562715628156291563015631156321563315634156351563615637156381563915640156411564215643156441564515646156471564815649156501565115652156531565415655156561565715658156591566015661156621566315664156651566615667156681566915670156711567215673156741567515676156771567815679156801568115682156831568415685156861568715688156891569015691156921569315694156951569615697156981569915700157011570215703157041570515706157071570815709157101571115712157131571415715157161571715718157191572015721157221572315724157251572615727157281572915730157311573215733157341573515736157371573815739157401574115742157431574415745157461574715748157491575015751157521575315754157551575615757157581575915760157611576215763157641576515766157671576815769157701577115772157731577415775157761577715778157791578015781157821578315784157851578615787157881578915790157911579215793157941579515796157971579815799158001580115802158031580415805158061580715808158091581015811158121581315814158151581615817158181581915820158211582215823158241582515826158271582815829158301583115832158331583415835158361583715838158391584015841158421584315844158451584615847158481584915850158511585215853158541585515856158571585815859158601586115862158631586415865158661586715868158691587015871158721587315874158751587615877158781587915880158811588215883158841588515886158871588815889158901589115892158931589415895158961589715898158991590015901159021590315904159051590615907159081590915910159111591215913159141591515916159171591815919159201592115922159231592415925159261592715928159291593015931159321593315934159351593615937159381593915940159411594215943159441594515946159471594815949159501595115952159531595415955159561595715958159591596015961159621596315964159651596615967159681596915970159711597215973159741597515976159771597815979159801598115982159831598415985159861598715988159891599015991159921599315994159951599615997159981599916000160011600216003160041600516006160071600816009160101601116012160131601416015160161601716018160191602016021160221602316024160251602616027160281602916030160311603216033160341603516036160371603816039160401604116042160431604416045160461604716048160491605016051160521605316054160551605616057160581605916060160611606216063160641606516066160671606816069160701607116072160731607416075160761607716078160791608016081160821608316084160851608616087160881608916090160911609216093160941609516096160971609816099161001610116102161031610416105161061610716108161091611016111161121611316114161151611616117161181611916120161211612216123161241612516126161271612816129161301613116132161331613416135161361613716138161391614016141161421614316144161451614616147161481614916150161511615216153161541615516156161571615816159161601616116162161631616416165161661616716168161691617016171161721617316174161751617616177161781617916180161811618216183161841618516186161871618816189161901619116192161931619416195161961619716198161991620016201162021620316204162051620616207162081620916210162111621216213162141621516216162171621816219162201622116222162231622416225162261622716228162291623016231162321623316234162351623616237162381623916240162411624216243162441624516246162471624816249162501625116252162531625416255162561625716258162591626016261162621626316264162651626616267162681626916270162711627216273162741627516276162771627816279162801628116282162831628416285162861628716288162891629016291162921629316294162951629616297162981629916300163011630216303163041630516306163071630816309163101631116312163131631416315163161631716318163191632016321163221632316324163251632616327163281632916330163311633216333163341633516336163371633816339163401634116342163431634416345163461634716348163491635016351163521635316354163551635616357163581635916360163611636216363163641636516366163671636816369163701637116372163731637416375163761637716378163791638016381163821638316384163851638616387163881638916390163911639216393163941639516396163971639816399164001640116402164031640416405164061640716408164091641016411164121641316414164151641616417164181641916420164211642216423164241642516426164271642816429164301643116432164331643416435164361643716438164391644016441164421644316444164451644616447164481644916450164511645216453164541645516456164571645816459164601646116462164631646416465164661646716468164691647016471164721647316474164751647616477164781647916480164811648216483164841648516486164871648816489164901649116492164931649416495164961649716498164991650016501165021650316504165051650616507165081650916510165111651216513165141651516516165171651816519165201652116522165231652416525165261652716528165291653016531165321653316534165351653616537165381653916540165411654216543165441654516546165471654816549165501655116552165531655416555165561655716558165591656016561165621656316564165651656616567165681656916570165711657216573165741657516576165771657816579165801658116582165831658416585165861658716588165891659016591165921659316594165951659616597165981659916600166011660216603166041660516606166071660816609166101661116612166131661416615166161661716618166191662016621166221662316624166251662616627166281662916630166311663216633166341663516636166371663816639166401664116642166431664416645166461664716648166491665016651166521665316654166551665616657166581665916660166611666216663166641666516666166671666816669166701667116672166731667416675166761667716678166791668016681166821668316684166851668616687166881668916690166911669216693166941669516696166971669816699167001670116702167031670416705167061670716708167091671016711167121671316714167151671616717167181671916720167211672216723167241672516726167271672816729167301673116732167331673416735167361673716738167391674016741167421674316744167451674616747167481674916750167511675216753167541675516756167571675816759167601676116762167631676416765167661676716768167691677016771167721677316774167751677616777167781677916780167811678216783167841678516786167871678816789167901679116792167931679416795167961679716798167991680016801168021680316804168051680616807168081680916810168111681216813168141681516816168171681816819168201682116822168231682416825168261682716828168291683016831168321683316834168351683616837168381683916840168411684216843168441684516846168471684816849168501685116852168531685416855168561685716858168591686016861168621686316864168651686616867168681686916870168711687216873168741687516876168771687816879168801688116882168831688416885168861688716888168891689016891168921689316894168951689616897168981689916900169011690216903169041690516906169071690816909169101691116912169131691416915169161691716918169191692016921169221692316924169251692616927169281692916930169311693216933169341693516936169371693816939169401694116942169431694416945169461694716948169491695016951169521695316954169551695616957169581695916960169611696216963169641696516966169671696816969169701697116972169731697416975169761697716978169791698016981169821698316984169851698616987169881698916990169911699216993169941699516996169971699816999170001700117002170031700417005170061700717008170091701017011170121701317014170151701617017170181701917020170211702217023170241702517026170271702817029170301703117032170331703417035170361703717038170391704017041170421704317044170451704617047170481704917050170511705217053170541705517056170571705817059170601706117062170631706417065170661706717068170691707017071170721707317074170751707617077170781707917080170811708217083170841708517086170871708817089170901709117092170931709417095170961709717098170991710017101171021710317104171051710617107171081710917110171111711217113171141711517116171171711817119171201712117122171231712417125171261712717128171291713017131171321713317134171351713617137171381713917140171411714217143171441714517146171471714817149171501715117152171531715417155171561715717158171591716017161171621716317164171651716617167171681716917170171711717217173171741717517176171771717817179171801718117182171831718417185171861718717188171891719017191171921719317194171951719617197171981719917200172011720217203172041720517206172071720817209172101721117212172131721417215172161721717218172191722017221172221722317224172251722617227172281722917230172311723217233172341723517236172371723817239172401724117242172431724417245172461724717248172491725017251172521725317254172551725617257172581725917260172611726217263172641726517266172671726817269172701727117272172731727417275172761727717278172791728017281172821728317284172851728617287172881728917290172911729217293172941729517296172971729817299173001730117302173031730417305173061730717308173091731017311173121731317314173151731617317173181731917320173211732217323173241732517326173271732817329173301733117332173331733417335173361733717338173391734017341173421734317344173451734617347173481734917350173511735217353173541735517356173571735817359173601736117362173631736417365173661736717368173691737017371173721737317374173751737617377173781737917380173811738217383173841738517386173871738817389173901739117392173931739417395173961739717398173991740017401174021740317404174051740617407174081740917410174111741217413174141741517416174171741817419174201742117422174231742417425174261742717428174291743017431174321743317434174351743617437174381743917440174411744217443174441744517446174471744817449174501745117452174531745417455174561745717458174591746017461174621746317464174651746617467174681746917470174711747217473174741747517476174771747817479174801748117482174831748417485174861748717488174891749017491174921749317494174951749617497174981749917500175011750217503175041750517506175071750817509175101751117512175131751417515175161751717518175191752017521175221752317524175251752617527175281752917530175311753217533175341753517536175371753817539175401754117542175431754417545175461754717548175491755017551175521755317554175551755617557175581755917560175611756217563175641756517566175671756817569175701757117572175731757417575175761757717578175791758017581175821758317584175851758617587175881758917590175911759217593175941759517596175971759817599176001760117602176031760417605176061760717608176091761017611176121761317614176151761617617176181761917620176211762217623176241762517626176271762817629176301763117632176331763417635176361763717638176391764017641176421764317644176451764617647176481764917650176511765217653176541765517656176571765817659176601766117662176631766417665176661766717668176691767017671176721767317674176751767617677176781767917680176811768217683176841768517686176871768817689176901769117692176931769417695176961769717698176991770017701177021770317704177051770617707177081770917710177111771217713177141771517716177171771817719177201772117722177231772417725177261772717728177291773017731177321773317734177351773617737177381773917740177411774217743177441774517746177471774817749177501775117752177531775417755177561775717758177591776017761177621776317764177651776617767177681776917770177711777217773177741777517776177771777817779177801778117782177831778417785177861778717788177891779017791177921779317794177951779617797177981779917800178011780217803178041780517806178071780817809178101781117812178131781417815178161781717818178191782017821178221782317824178251782617827178281782917830178311783217833178341783517836178371783817839178401784117842178431784417845178461784717848178491785017851178521785317854178551785617857178581785917860178611786217863178641786517866178671786817869178701787117872178731787417875178761787717878178791788017881178821788317884178851788617887178881788917890178911789217893178941789517896178971789817899179001790117902179031790417905179061790717908179091791017911179121791317914179151791617917179181791917920179211792217923179241792517926179271792817929179301793117932179331793417935179361793717938179391794017941179421794317944179451794617947179481794917950179511795217953179541795517956179571795817959179601796117962179631796417965179661796717968179691797017971179721797317974179751797617977179781797917980179811798217983179841798517986179871798817989179901799117992179931799417995179961799717998179991800018001180021800318004180051800618007180081800918010180111801218013180141801518016180171801818019180201802118022180231802418025180261802718028180291803018031180321803318034180351803618037180381803918040180411804218043180441804518046180471804818049180501805118052180531805418055180561805718058180591806018061180621806318064180651806618067180681806918070180711807218073180741807518076180771807818079180801808118082180831808418085180861808718088180891809018091180921809318094180951809618097180981809918100181011810218103181041810518106181071810818109181101811118112181131811418115181161811718118181191812018121181221812318124181251812618127181281812918130181311813218133181341813518136181371813818139181401814118142181431814418145181461814718148181491815018151181521815318154181551815618157181581815918160181611816218163181641816518166181671816818169181701817118172181731817418175181761817718178181791818018181181821818318184181851818618187181881818918190181911819218193181941819518196181971819818199182001820118202182031820418205182061820718208182091821018211182121821318214182151821618217182181821918220182211822218223182241822518226182271822818229182301823118232182331823418235182361823718238182391824018241182421824318244182451824618247182481824918250182511825218253182541825518256182571825818259182601826118262182631826418265182661826718268182691827018271182721827318274182751827618277182781827918280182811828218283182841828518286182871828818289182901829118292182931829418295182961829718298182991830018301183021830318304183051830618307183081830918310183111831218313183141831518316183171831818319183201832118322183231832418325183261832718328183291833018331183321833318334183351833618337183381833918340183411834218343183441834518346183471834818349183501835118352183531835418355183561835718358183591836018361183621836318364183651836618367183681836918370183711837218373183741837518376183771837818379183801838118382183831838418385183861838718388183891839018391183921839318394183951839618397183981839918400184011840218403184041840518406184071840818409184101841118412184131841418415184161841718418184191842018421184221842318424184251842618427184281842918430184311843218433184341843518436184371843818439184401844118442184431844418445184461844718448184491845018451184521845318454184551845618457184581845918460184611846218463184641846518466184671846818469184701847118472184731847418475184761847718478184791848018481184821848318484184851848618487184881848918490184911849218493184941849518496184971849818499185001850118502185031850418505185061850718508185091851018511185121851318514185151851618517185181851918520185211852218523185241852518526185271852818529185301853118532185331853418535185361853718538185391854018541185421854318544185451854618547185481854918550185511855218553 |
- <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
- <!-- NewPage -->
- <html lang="en">
- <head>
- <!-- Generated by javadoc (1.8.0_312) on Wed Jun 28 12:47:25 UTC 2023 -->
- <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
- <title>Calib3d (OpenCV 4.8.0 Java documentation)</title>
- <meta name="date" content="2023-06-28">
- <link rel="stylesheet" type="text/css" href="../../../stylesheet.css" title="Style">
- <script type="text/javascript" src="../../../script.js"></script>
- </head>
- <body>
- <script type="text/javascript"><!--
- try {
- if (location.href.indexOf('is-external=true') == -1) {
- parent.document.title="Calib3d (OpenCV 4.8.0 Java documentation)";
- }
- }
- catch(err) {
- }
- //-->
- var methods = {"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9,"i32":9,"i33":9,"i34":9,"i35":9,"i36":9,"i37":9,"i38":9,"i39":9,"i40":9,"i41":9,"i42":9,"i43":9,"i44":9,"i45":9,"i46":9,"i47":9,"i48":9,"i49":9,"i50":9,"i51":9,"i52":9,"i53":9,"i54":9,"i55":9,"i56":9,"i57":9,"i58":9,"i59":9,"i60":9,"i61":9,"i62":9,"i63":9,"i64":9,"i65":9,"i66":9,"i67":9,"i68":9,"i69":9,"i70":9,"i71":9,"i72":9,"i73":9,"i74":9,"i75":9,"i76":9,"i77":9,"i78":9,"i79":9,"i80":9,"i81":9,"i82":9,"i83":9,"i84":9,"i85":9,"i86":9,"i87":9,"i88":9,"i89":9,"i90":9,"i91":9,"i92":9,"i93":9,"i94":9,"i95":9,"i96":9,"i97":9,"i98":9,"i99":9,"i100":9,"i101":9,"i102":9,"i103":9,"i104":9,"i105":9,"i106":9,"i107":9,"i108":9,"i109":9,"i110":9,"i111":9,"i112":9,"i113":9,"i114":9,"i115":9,"i116":9,"i117":9,"i118":9,"i119":9,"i120":9,"i121":9,"i122":9,"i123":9,"i124":9,"i125":9,"i126":9,"i127":9,"i128":9,"i129":9,"i130":9,"i131":9,"i132":9,"i133":9,"i134":9,"i135":9,"i136":9,"i137":9,"i138":9,"i139":9,"i140":9,"i141":9,"i142":9,"i143":9,"i144":9,"i145":9,"i146":9,"i147":9,"i148":9,"i149":9,"i150":9,"i151":9,"i152":9,"i153":9,"i154":9,"i155":9,"i156":9,"i157":9,"i158":9,"i159":9,"i160":9,"i161":9,"i162":9,"i163":9,"i164":9,"i165":9,"i166":9,"i167":9,"i168":9,"i169":9,"i170":9,"i171":9,"i172":9,"i173":9,"i174":9,"i175":9,"i176":9,"i177":9,"i178":9,"i179":9,"i180":9,"i181":9,"i182":9,"i183":9,"i184":9,"i185":9,"i186":9,"i187":9,"i188":9,"i189":9,"i190":9,"i191":9,"i192":9,"i193":9,"i194":9,"i195":9,"i196":9,"i197":9,"i198":9,"i199":9,"i200":9,"i201":9,"i202":9,"i203":9,"i204":9,"i205":9,"i206":9,"i207":9,"i208":9,"i209":9,"i210":9,"i211":9,"i212":9,"i213":9,"i214":9,"i215":9,"i216":9,"i217":9,"i218":9,"i219":9,"i220":9,"i221":9,"i222":9,"i223":9,"i224":9,"i225":9,"i226":9,"i227":9,"i228":9,"i229":9,"i230":9,"i231":9,"i232":9,"i233":9,"i234":9,"i235":9,"i236":9,"i237":9,"i238":9};
- var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],8:["t4","Concrete Methods"]};
- var altColor = "altColor";
- var rowColor = "rowColor";
- var tableTab = "tableTab";
- var activeTableTab = "activeTableTab";
- </script>
- <noscript>
- <div>JavaScript is disabled on your browser.</div>
- </noscript>
- <!-- ========= START OF TOP NAVBAR ======= -->
- <div class="topNav"><a name="navbar.top">
- <!-- -->
- </a>
- <div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
- <a name="navbar.top.firstrow">
- <!-- -->
- </a>
- <ul class="navList" title="Navigation">
- <li><a href="../../../overview-summary.html">Overview</a></li>
- <li><a href="package-summary.html">Package</a></li>
- <li class="navBarCell1Rev">Class</li>
- <li><a href="package-tree.html">Tree</a></li>
- <li><a href="../../../index-all.html">Index</a></li>
- <li><a href="../../../help-doc.html">Help</a></li>
- </ul>
- <div class="aboutLanguage">
- <script>
- var url = window.location.href;
- var pos = url.lastIndexOf('/javadoc/');
- url = pos >= 0 ? (url.substring(0, pos) + '/javadoc/mymath.js') : (window.location.origin + '/mymath.js');
- var script = document.createElement('script');
- script.src = 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS-MML_HTMLorMML,' + url;
- document.getElementsByTagName('head')[0].appendChild(script);
- </script>
- </div>
- </div>
- <div class="subNav">
- <ul class="navList">
- <li>Prev Class</li>
- <li><a href="../../../org/opencv/calib3d/StereoBM.html" title="class in org.opencv.calib3d"><span class="typeNameLink">Next Class</span></a></li>
- </ul>
- <ul class="navList">
- <li><a href="../../../index.html?org/opencv/calib3d/Calib3d.html" target="_top">Frames</a></li>
- <li><a href="Calib3d.html" target="_top">No Frames</a></li>
- </ul>
- <ul class="navList" id="allclasses_navbar_top">
- <li><a href="../../../allclasses-noframe.html">All Classes</a></li>
- </ul>
- <div>
- <script type="text/javascript"><!--
- allClassesLink = document.getElementById("allclasses_navbar_top");
- if(window==top) {
- allClassesLink.style.display = "block";
- }
- else {
- allClassesLink.style.display = "none";
- }
- //-->
- </script>
- </div>
- <div>
- <ul class="subNavList">
- <li>Summary: </li>
- <li>Nested | </li>
- <li><a href="#field.summary">Field</a> | </li>
- <li><a href="#constructor.summary">Constr</a> | </li>
- <li><a href="#method.summary">Method</a></li>
- </ul>
- <ul class="subNavList">
- <li>Detail: </li>
- <li><a href="#field.detail">Field</a> | </li>
- <li><a href="#constructor.detail">Constr</a> | </li>
- <li><a href="#method.detail">Method</a></li>
- </ul>
- </div>
- <a name="skip.navbar.top">
- <!-- -->
- </a></div>
- <!-- ========= END OF TOP NAVBAR ========= -->
- <!-- ======== START OF CLASS DATA ======== -->
- <div class="header">
- <div class="subTitle">org.opencv.calib3d</div>
- <h2 title="Class Calib3d" class="title">Class Calib3d</h2>
- </div>
- <div class="contentContainer">
- <ul class="inheritance">
- <li>java.lang.Object</li>
- <li>
- <ul class="inheritance">
- <li>org.opencv.calib3d.Calib3d</li>
- </ul>
- </li>
- </ul>
- <div class="description">
- <ul class="blockList">
- <li class="blockList">
- <hr>
- <br>
- <pre>public class <span class="typeNameLabel">Calib3d</span>
- extends java.lang.Object</pre>
- </li>
- </ul>
- </div>
- <div class="summary">
- <ul class="blockList">
- <li class="blockList">
- <!-- =========== FIELD SUMMARY =========== -->
- <ul class="blockList">
- <li class="blockList"><a name="field.summary">
- <!-- -->
- </a>
- <h3>Field Summary</h3>
- <table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Field Summary table, listing fields, and an explanation">
- <caption><span>Fields</span><span class="tabEnd"> </span></caption>
- <tr>
- <th class="colFirst" scope="col">Modifier and Type</th>
- <th class="colLast" scope="col">Field and Description</th>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_CB_ACCURACY">CALIB_CB_ACCURACY</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_CB_ADAPTIVE_THRESH">CALIB_CB_ADAPTIVE_THRESH</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_CB_ASYMMETRIC_GRID">CALIB_CB_ASYMMETRIC_GRID</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_CB_CLUSTERING">CALIB_CB_CLUSTERING</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_CB_EXHAUSTIVE">CALIB_CB_EXHAUSTIVE</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_CB_FAST_CHECK">CALIB_CB_FAST_CHECK</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_CB_FILTER_QUADS">CALIB_CB_FILTER_QUADS</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_CB_LARGER">CALIB_CB_LARGER</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_CB_MARKER">CALIB_CB_MARKER</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_CB_NORMALIZE_IMAGE">CALIB_CB_NORMALIZE_IMAGE</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_CB_SYMMETRIC_GRID">CALIB_CB_SYMMETRIC_GRID</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_FIX_ASPECT_RATIO">CALIB_FIX_ASPECT_RATIO</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_FIX_FOCAL_LENGTH">CALIB_FIX_FOCAL_LENGTH</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_FIX_INTRINSIC">CALIB_FIX_INTRINSIC</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_FIX_K1">CALIB_FIX_K1</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_FIX_K2">CALIB_FIX_K2</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_FIX_K3">CALIB_FIX_K3</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_FIX_K4">CALIB_FIX_K4</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_FIX_K5">CALIB_FIX_K5</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_FIX_K6">CALIB_FIX_K6</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_FIX_PRINCIPAL_POINT">CALIB_FIX_PRINCIPAL_POINT</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_FIX_S1_S2_S3_S4">CALIB_FIX_S1_S2_S3_S4</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_FIX_TANGENT_DIST">CALIB_FIX_TANGENT_DIST</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_FIX_TAUX_TAUY">CALIB_FIX_TAUX_TAUY</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_HAND_EYE_ANDREFF">CALIB_HAND_EYE_ANDREFF</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_HAND_EYE_DANIILIDIS">CALIB_HAND_EYE_DANIILIDIS</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_HAND_EYE_HORAUD">CALIB_HAND_EYE_HORAUD</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_HAND_EYE_PARK">CALIB_HAND_EYE_PARK</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_HAND_EYE_TSAI">CALIB_HAND_EYE_TSAI</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_NINTRINSIC">CALIB_NINTRINSIC</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_RATIONAL_MODEL">CALIB_RATIONAL_MODEL</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_ROBOT_WORLD_HAND_EYE_LI">CALIB_ROBOT_WORLD_HAND_EYE_LI</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_ROBOT_WORLD_HAND_EYE_SHAH">CALIB_ROBOT_WORLD_HAND_EYE_SHAH</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_SAME_FOCAL_LENGTH">CALIB_SAME_FOCAL_LENGTH</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_THIN_PRISM_MODEL">CALIB_THIN_PRISM_MODEL</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_TILTED_MODEL">CALIB_TILTED_MODEL</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_USE_EXTRINSIC_GUESS">CALIB_USE_EXTRINSIC_GUESS</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_USE_INTRINSIC_GUESS">CALIB_USE_INTRINSIC_GUESS</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_USE_LU">CALIB_USE_LU</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_USE_QR">CALIB_USE_QR</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_ZERO_DISPARITY">CALIB_ZERO_DISPARITY</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CALIB_ZERO_TANGENT_DIST">CALIB_ZERO_TANGENT_DIST</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CirclesGridFinderParameters_ASYMMETRIC_GRID">CirclesGridFinderParameters_ASYMMETRIC_GRID</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CirclesGridFinderParameters_SYMMETRIC_GRID">CirclesGridFinderParameters_SYMMETRIC_GRID</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#COV_POLISHER">COV_POLISHER</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CV_DLS">CV_DLS</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CV_EPNP">CV_EPNP</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CV_ITERATIVE">CV_ITERATIVE</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CV_P3P">CV_P3P</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CvLevMarq_CALC_J">CvLevMarq_CALC_J</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CvLevMarq_CHECK_ERR">CvLevMarq_CHECK_ERR</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CvLevMarq_DONE">CvLevMarq_DONE</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#CvLevMarq_STARTED">CvLevMarq_STARTED</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_CALIB_CHECK_COND">fisheye_CALIB_CHECK_COND</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_CALIB_FIX_FOCAL_LENGTH">fisheye_CALIB_FIX_FOCAL_LENGTH</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_CALIB_FIX_INTRINSIC">fisheye_CALIB_FIX_INTRINSIC</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_CALIB_FIX_K1">fisheye_CALIB_FIX_K1</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_CALIB_FIX_K2">fisheye_CALIB_FIX_K2</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_CALIB_FIX_K3">fisheye_CALIB_FIX_K3</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_CALIB_FIX_K4">fisheye_CALIB_FIX_K4</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_CALIB_FIX_PRINCIPAL_POINT">fisheye_CALIB_FIX_PRINCIPAL_POINT</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_CALIB_FIX_SKEW">fisheye_CALIB_FIX_SKEW</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_CALIB_RECOMPUTE_EXTRINSIC">fisheye_CALIB_RECOMPUTE_EXTRINSIC</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_CALIB_USE_INTRINSIC_GUESS">fisheye_CALIB_USE_INTRINSIC_GUESS</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_CALIB_ZERO_DISPARITY">fisheye_CALIB_ZERO_DISPARITY</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#FM_7POINT">FM_7POINT</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#FM_8POINT">FM_8POINT</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#FM_LMEDS">FM_LMEDS</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#FM_RANSAC">FM_RANSAC</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#LMEDS">LMEDS</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#LOCAL_OPTIM_GC">LOCAL_OPTIM_GC</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#LOCAL_OPTIM_INNER_AND_ITER_LO">LOCAL_OPTIM_INNER_AND_ITER_LO</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#LOCAL_OPTIM_INNER_LO">LOCAL_OPTIM_INNER_LO</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#LOCAL_OPTIM_NULL">LOCAL_OPTIM_NULL</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#LOCAL_OPTIM_SIGMA">LOCAL_OPTIM_SIGMA</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#LSQ_POLISHER">LSQ_POLISHER</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#MAGSAC">MAGSAC</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#NEIGH_FLANN_KNN">NEIGH_FLANN_KNN</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#NEIGH_FLANN_RADIUS">NEIGH_FLANN_RADIUS</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#NEIGH_GRID">NEIGH_GRID</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#NONE_POLISHER">NONE_POLISHER</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#PROJ_SPHERICAL_EQRECT">PROJ_SPHERICAL_EQRECT</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#PROJ_SPHERICAL_ORTHO">PROJ_SPHERICAL_ORTHO</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#RANSAC">RANSAC</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#RHO">RHO</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SAMPLING_NAPSAC">SAMPLING_NAPSAC</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SAMPLING_PROGRESSIVE_NAPSAC">SAMPLING_PROGRESSIVE_NAPSAC</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SAMPLING_PROSAC">SAMPLING_PROSAC</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SAMPLING_UNIFORM">SAMPLING_UNIFORM</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SCORE_METHOD_LMEDS">SCORE_METHOD_LMEDS</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SCORE_METHOD_MAGSAC">SCORE_METHOD_MAGSAC</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SCORE_METHOD_MSAC">SCORE_METHOD_MSAC</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SCORE_METHOD_RANSAC">SCORE_METHOD_RANSAC</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SOLVEPNP_AP3P">SOLVEPNP_AP3P</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SOLVEPNP_DLS">SOLVEPNP_DLS</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SOLVEPNP_EPNP">SOLVEPNP_EPNP</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SOLVEPNP_IPPE">SOLVEPNP_IPPE</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SOLVEPNP_IPPE_SQUARE">SOLVEPNP_IPPE_SQUARE</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SOLVEPNP_ITERATIVE">SOLVEPNP_ITERATIVE</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SOLVEPNP_MAX_COUNT">SOLVEPNP_MAX_COUNT</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SOLVEPNP_P3P">SOLVEPNP_P3P</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SOLVEPNP_SQPNP">SOLVEPNP_SQPNP</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#SOLVEPNP_UPNP">SOLVEPNP_UPNP</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#USAC_ACCURATE">USAC_ACCURATE</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#USAC_DEFAULT">USAC_DEFAULT</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#USAC_FAST">USAC_FAST</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#USAC_FM_8PTS">USAC_FM_8PTS</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#USAC_MAGSAC">USAC_MAGSAC</a></span></code> </td>
- </tr>
- <tr class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#USAC_PARALLEL">USAC_PARALLEL</a></span></code> </td>
- </tr>
- <tr class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#USAC_PROSAC">USAC_PROSAC</a></span></code> </td>
- </tr>
- </table>
- </li>
- </ul>
- <!-- ======== CONSTRUCTOR SUMMARY ======== -->
- <ul class="blockList">
- <li class="blockList"><a name="constructor.summary">
- <!-- -->
- </a>
- <h3>Constructor Summary</h3>
- <table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation">
- <caption><span>Constructors</span><span class="tabEnd"> </span></caption>
- <tr>
- <th class="colOne" scope="col">Constructor and Description</th>
- </tr>
- <tr class="altColor">
- <td class="colOne"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#Calib3d--">Calib3d</a></span>()</code> </td>
- </tr>
- </table>
- </li>
- </ul>
- <!-- ========== METHOD SUMMARY =========== -->
- <ul class="blockList">
- <li class="blockList"><a name="method.summary">
- <!-- -->
- </a>
- <h3>Method Summary</h3>
- <table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation">
- <caption><span id="t0" class="activeTableTab"><span>All Methods</span><span class="tabEnd"> </span></span><span id="t1" class="tableTab"><span><a href="javascript:show(1);">Static Methods</a></span><span class="tabEnd"> </span></span><span id="t4" class="tableTab"><span><a href="javascript:show(8);">Concrete Methods</a></span><span class="tabEnd"> </span></span></caption>
- <tr>
- <th class="colFirst" scope="col">Modifier and Type</th>
- <th class="colLast" scope="col">Method and Description</th>
- </tr>
- <tr id="i0" class="altColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrateCamera-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-">calibrateCamera</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs)</code> </td>
- </tr>
- <tr id="i1" class="rowColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrateCamera-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-int-">calibrateCamera</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- int flags)</code> </td>
- </tr>
- <tr id="i2" class="altColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrateCamera-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-int-org.opencv.core.TermCriteria-">calibrateCamera</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</code> </td>
- </tr>
- <tr id="i3" class="rowColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrateCameraExtended-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">calibrateCameraExtended</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsIntrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsExtrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors)</code>
- <div class="block">Finds the camera intrinsic and extrinsic parameters from several views of a calibration
- pattern.</div>
- </td>
- </tr>
- <tr id="i4" class="altColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrateCameraExtended-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">calibrateCameraExtended</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsIntrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsExtrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors,
- int flags)</code>
- <div class="block">Finds the camera intrinsic and extrinsic parameters from several views of a calibration
- pattern.</div>
- </td>
- </tr>
- <tr id="i5" class="rowColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrateCameraExtended-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-org.opencv.core.TermCriteria-">calibrateCameraExtended</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsIntrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsExtrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</code>
- <div class="block">Finds the camera intrinsic and extrinsic parameters from several views of a calibration
- pattern.</div>
- </td>
- </tr>
- <tr id="i6" class="altColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrateCameraRO-java.util.List-java.util.List-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-">calibrateCameraRO</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- int iFixedPoint,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newObjPoints)</code> </td>
- </tr>
- <tr id="i7" class="rowColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrateCameraRO-java.util.List-java.util.List-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-int-">calibrateCameraRO</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- int iFixedPoint,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newObjPoints,
- int flags)</code> </td>
- </tr>
- <tr id="i8" class="altColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrateCameraRO-java.util.List-java.util.List-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-int-org.opencv.core.TermCriteria-">calibrateCameraRO</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- int iFixedPoint,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newObjPoints,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</code> </td>
- </tr>
- <tr id="i9" class="rowColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrateCameraROExtended-java.util.List-java.util.List-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">calibrateCameraROExtended</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- int iFixedPoint,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newObjPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsIntrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsExtrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsObjPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors)</code>
- <div class="block">Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.</div>
- </td>
- </tr>
- <tr id="i10" class="altColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrateCameraROExtended-java.util.List-java.util.List-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">calibrateCameraROExtended</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- int iFixedPoint,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newObjPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsIntrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsExtrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsObjPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors,
- int flags)</code>
- <div class="block">Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.</div>
- </td>
- </tr>
- <tr id="i11" class="rowColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrateCameraROExtended-java.util.List-java.util.List-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-org.opencv.core.TermCriteria-">calibrateCameraROExtended</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- int iFixedPoint,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newObjPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsIntrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsExtrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsObjPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</code>
- <div class="block">Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.</div>
- </td>
- </tr>
- <tr id="i12" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrateHandEye-java.util.List-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-">calibrateHandEye</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> R_gripper2base,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> t_gripper2base,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> R_target2cam,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> t_target2cam,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R_cam2gripper,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t_cam2gripper)</code>
- <div class="block">Computes Hand-Eye calibration: \(_{}^{g}\textrm{T}_c\)</div>
- </td>
- </tr>
- <tr id="i13" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrateHandEye-java.util.List-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-int-">calibrateHandEye</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> R_gripper2base,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> t_gripper2base,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> R_target2cam,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> t_target2cam,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R_cam2gripper,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t_cam2gripper,
- int method)</code>
- <div class="block">Computes Hand-Eye calibration: \(_{}^{g}\textrm{T}_c\)</div>
- </td>
- </tr>
- <tr id="i14" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrateRobotWorldHandEye-java.util.List-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">calibrateRobotWorldHandEye</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> R_world2cam,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> t_world2cam,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> R_base2gripper,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> t_base2gripper,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R_base2world,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t_base2world,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R_gripper2cam,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t_gripper2cam)</code>
- <div class="block">Computes Robot-World/Hand-Eye calibration: \(_{}^{w}\textrm{T}_b\) and \(_{}^{c}\textrm{T}_g\)</div>
- </td>
- </tr>
- <tr id="i15" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrateRobotWorldHandEye-java.util.List-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">calibrateRobotWorldHandEye</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> R_world2cam,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> t_world2cam,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> R_base2gripper,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> t_base2gripper,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R_base2world,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t_base2world,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R_gripper2cam,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t_gripper2cam,
- int method)</code>
- <div class="block">Computes Robot-World/Hand-Eye calibration: \(_{}^{w}\textrm{T}_b\) and \(_{}^{c}\textrm{T}_g\)</div>
- </td>
- </tr>
- <tr id="i16" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#calibrationMatrixValues-org.opencv.core.Mat-org.opencv.core.Size-double-double-double:A-double:A-double:A-org.opencv.core.Point-double:A-">calibrationMatrixValues</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- double apertureWidth,
- double apertureHeight,
- double[] fovx,
- double[] fovy,
- double[] focalLength,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> principalPoint,
- double[] aspectRatio)</code>
- <div class="block">Computes useful camera characteristics from the camera intrinsic matrix.</div>
- </td>
- </tr>
- <tr id="i17" class="rowColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#checkChessboard-org.opencv.core.Mat-org.opencv.core.Size-">checkChessboard</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> img,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> size)</code> </td>
- </tr>
- <tr id="i18" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">composeRT</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3)</code>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- </td>
- </tr>
- <tr id="i19" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">composeRT</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr1)</code>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- </td>
- </tr>
- <tr id="i20" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">composeRT</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt1)</code>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- </td>
- </tr>
- <tr id="i21" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">composeRT</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr2)</code>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- </td>
- </tr>
- <tr id="i22" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">composeRT</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt2)</code>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- </td>
- </tr>
- <tr id="i23" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">composeRT</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dr1)</code>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- </td>
- </tr>
- <tr id="i24" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">composeRT</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dt1)</code>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- </td>
- </tr>
- <tr id="i25" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">composeRT</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dr2)</code>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- </td>
- </tr>
- <tr id="i26" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">composeRT</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dr2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dt2)</code>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- </td>
- </tr>
- <tr id="i27" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#computeCorrespondEpilines-org.opencv.core.Mat-int-org.opencv.core.Mat-org.opencv.core.Mat-">computeCorrespondEpilines</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points,
- int whichImage,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> lines)</code>
- <div class="block">For points in an image of a stereo pair, computes the corresponding epilines in the other image.</div>
- </td>
- </tr>
- <tr id="i28" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#convertPointsFromHomogeneous-org.opencv.core.Mat-org.opencv.core.Mat-">convertPointsFromHomogeneous</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst)</code>
- <div class="block">Converts points from homogeneous to Euclidean space.</div>
- </td>
- </tr>
- <tr id="i29" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#convertPointsToHomogeneous-org.opencv.core.Mat-org.opencv.core.Mat-">convertPointsToHomogeneous</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst)</code>
- <div class="block">Converts points from Euclidean to homogeneous space.</div>
- </td>
- </tr>
- <tr id="i30" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#correctMatches-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">correctMatches</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newPoints1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newPoints2)</code>
- <div class="block">Refines coordinates of corresponding points.</div>
- </td>
- </tr>
- <tr id="i31" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#decomposeEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">decomposeEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t)</code>
- <div class="block">Decompose an essential matrix to possible rotations and translation.</div>
- </td>
- </tr>
- <tr id="i32" class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#decomposeHomographyMat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-java.util.List-">decomposeHomographyMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> H,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rotations,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> translations,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> normals)</code>
- <div class="block">Decompose a homography matrix to rotation(s), translation(s) and plane normal(s).</div>
- </td>
- </tr>
- <tr id="i33" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#decomposeProjectionMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">decomposeProjectionMatrix</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> transVect)</code>
- <div class="block">Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.</div>
- </td>
- </tr>
- <tr id="i34" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#decomposeProjectionMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">decomposeProjectionMatrix</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> transVect,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixX)</code>
- <div class="block">Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.</div>
- </td>
- </tr>
- <tr id="i35" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#decomposeProjectionMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">decomposeProjectionMatrix</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> transVect,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixX,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixY)</code>
- <div class="block">Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.</div>
- </td>
- </tr>
- <tr id="i36" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#decomposeProjectionMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">decomposeProjectionMatrix</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> transVect,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixX,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixY,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixZ)</code>
- <div class="block">Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.</div>
- </td>
- </tr>
- <tr id="i37" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#decomposeProjectionMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">decomposeProjectionMatrix</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> transVect,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixX,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixY,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixZ,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> eulerAngles)</code>
- <div class="block">Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.</div>
- </td>
- </tr>
- <tr id="i38" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#drawChessboardCorners-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.MatOfPoint2f-boolean-">drawChessboardCorners</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> corners,
- boolean patternWasFound)</code>
- <div class="block">Renders the detected chessboard corners.</div>
- </td>
- </tr>
- <tr id="i39" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#drawFrameAxes-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-float-">drawFrameAxes</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- float length)</code>
- <div class="block">Draw axes of the world/object coordinate system from pose estimation.</div>
- </td>
- </tr>
- <tr id="i40" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#drawFrameAxes-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-float-int-">drawFrameAxes</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- float length,
- int thickness)</code>
- <div class="block">Draw axes of the world/object coordinate system from pose estimation.</div>
- </td>
- </tr>
- <tr id="i41" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffine2D-org.opencv.core.Mat-org.opencv.core.Mat-">estimateAffine2D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to)</code>
- <div class="block">Computes an optimal affine transformation between two 2D point sets.</div>
- </td>
- </tr>
- <tr id="i42" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffine2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">estimateAffine2D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers)</code>
- <div class="block">Computes an optimal affine transformation between two 2D point sets.</div>
- </td>
- </tr>
- <tr id="i43" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffine2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">estimateAffine2D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method)</code>
- <div class="block">Computes an optimal affine transformation between two 2D point sets.</div>
- </td>
- </tr>
- <tr id="i44" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffine2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-">estimateAffine2D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method,
- double ransacReprojThreshold)</code>
- <div class="block">Computes an optimal affine transformation between two 2D point sets.</div>
- </td>
- </tr>
- <tr id="i45" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffine2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-long-">estimateAffine2D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method,
- double ransacReprojThreshold,
- long maxIters)</code>
- <div class="block">Computes an optimal affine transformation between two 2D point sets.</div>
- </td>
- </tr>
- <tr id="i46" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffine2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-long-double-">estimateAffine2D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method,
- double ransacReprojThreshold,
- long maxIters,
- double confidence)</code>
- <div class="block">Computes an optimal affine transformation between two 2D point sets.</div>
- </td>
- </tr>
- <tr id="i47" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffine2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-long-double-long-">estimateAffine2D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method,
- double ransacReprojThreshold,
- long maxIters,
- double confidence,
- long refineIters)</code>
- <div class="block">Computes an optimal affine transformation between two 2D point sets.</div>
- </td>
- </tr>
- <tr id="i48" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffine2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.calib3d.UsacParams-">estimateAffine2D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> pts1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> pts2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- <a href="../../../org/opencv/calib3d/UsacParams.html" title="class in org.opencv.calib3d">UsacParams</a> params)</code> </td>
- </tr>
- <tr id="i49" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffine3D-org.opencv.core.Mat-org.opencv.core.Mat-">estimateAffine3D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst)</code>
- <div class="block">Computes an optimal affine transformation between two 3D point sets.</div>
- </td>
- </tr>
- <tr id="i50" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffine3D-org.opencv.core.Mat-org.opencv.core.Mat-double:A-">estimateAffine3D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- double[] scale)</code>
- <div class="block">Computes an optimal affine transformation between two 3D point sets.</div>
- </td>
- </tr>
- <tr id="i51" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffine3D-org.opencv.core.Mat-org.opencv.core.Mat-double:A-boolean-">estimateAffine3D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- double[] scale,
- boolean force_rotation)</code>
- <div class="block">Computes an optimal affine transformation between two 3D point sets.</div>
- </td>
- </tr>
- <tr id="i52" class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffine3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">estimateAffine3D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> out,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers)</code>
- <div class="block">Computes an optimal affine transformation between two 3D point sets.</div>
- </td>
- </tr>
- <tr id="i53" class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffine3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-">estimateAffine3D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> out,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- double ransacThreshold)</code>
- <div class="block">Computes an optimal affine transformation between two 3D point sets.</div>
- </td>
- </tr>
- <tr id="i54" class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffine3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-double-">estimateAffine3D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> out,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- double ransacThreshold,
- double confidence)</code>
- <div class="block">Computes an optimal affine transformation between two 3D point sets.</div>
- </td>
- </tr>
- <tr id="i55" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffinePartial2D-org.opencv.core.Mat-org.opencv.core.Mat-">estimateAffinePartial2D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to)</code>
- <div class="block">Computes an optimal limited affine transformation with 4 degrees of freedom between
- two 2D point sets.</div>
- </td>
- </tr>
- <tr id="i56" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffinePartial2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">estimateAffinePartial2D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers)</code>
- <div class="block">Computes an optimal limited affine transformation with 4 degrees of freedom between
- two 2D point sets.</div>
- </td>
- </tr>
- <tr id="i57" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffinePartial2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">estimateAffinePartial2D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method)</code>
- <div class="block">Computes an optimal limited affine transformation with 4 degrees of freedom between
- two 2D point sets.</div>
- </td>
- </tr>
- <tr id="i58" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffinePartial2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-">estimateAffinePartial2D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method,
- double ransacReprojThreshold)</code>
- <div class="block">Computes an optimal limited affine transformation with 4 degrees of freedom between
- two 2D point sets.</div>
- </td>
- </tr>
- <tr id="i59" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffinePartial2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-long-">estimateAffinePartial2D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method,
- double ransacReprojThreshold,
- long maxIters)</code>
- <div class="block">Computes an optimal limited affine transformation with 4 degrees of freedom between
- two 2D point sets.</div>
- </td>
- </tr>
- <tr id="i60" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffinePartial2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-long-double-">estimateAffinePartial2D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method,
- double ransacReprojThreshold,
- long maxIters,
- double confidence)</code>
- <div class="block">Computes an optimal limited affine transformation with 4 degrees of freedom between
- two 2D point sets.</div>
- </td>
- </tr>
- <tr id="i61" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateAffinePartial2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-long-double-long-">estimateAffinePartial2D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method,
- double ransacReprojThreshold,
- long maxIters,
- double confidence,
- long refineIters)</code>
- <div class="block">Computes an optimal limited affine transformation with 4 degrees of freedom between
- two 2D point sets.</div>
- </td>
- </tr>
- <tr id="i62" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Scalar.html" title="class in org.opencv.core">Scalar</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateChessboardSharpness-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-">estimateChessboardSharpness</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> corners)</code>
- <div class="block">Estimates the sharpness of a detected chessboard.</div>
- </td>
- </tr>
- <tr id="i63" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Scalar.html" title="class in org.opencv.core">Scalar</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateChessboardSharpness-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-float-">estimateChessboardSharpness</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> corners,
- float rise_distance)</code>
- <div class="block">Estimates the sharpness of a detected chessboard.</div>
- </td>
- </tr>
- <tr id="i64" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Scalar.html" title="class in org.opencv.core">Scalar</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateChessboardSharpness-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-float-boolean-">estimateChessboardSharpness</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> corners,
- float rise_distance,
- boolean vertical)</code>
- <div class="block">Estimates the sharpness of a detected chessboard.</div>
- </td>
- </tr>
- <tr id="i65" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Scalar.html" title="class in org.opencv.core">Scalar</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateChessboardSharpness-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-float-boolean-org.opencv.core.Mat-">estimateChessboardSharpness</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> corners,
- float rise_distance,
- boolean vertical,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> sharpness)</code>
- <div class="block">Estimates the sharpness of a detected chessboard.</div>
- </td>
- </tr>
- <tr id="i66" class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateTranslation3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">estimateTranslation3D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> out,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers)</code>
- <div class="block">Computes an optimal translation between two 3D point sets.</div>
- </td>
- </tr>
- <tr id="i67" class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateTranslation3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-">estimateTranslation3D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> out,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- double ransacThreshold)</code>
- <div class="block">Computes an optimal translation between two 3D point sets.</div>
- </td>
- </tr>
- <tr id="i68" class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#estimateTranslation3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-double-">estimateTranslation3D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> out,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- double ransacThreshold,
- double confidence)</code>
- <div class="block">Computes an optimal translation between two 3D point sets.</div>
- </td>
- </tr>
- <tr id="i69" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#filterHomographyDecompByVisibleRefpoints-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">filterHomographyDecompByVisibleRefpoints</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rotations,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> normals,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> beforePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> afterPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> possibleSolutions)</code>
- <div class="block">Filters homography decompositions based on additional information.</div>
- </td>
- </tr>
- <tr id="i70" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#filterHomographyDecompByVisibleRefpoints-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">filterHomographyDecompByVisibleRefpoints</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rotations,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> normals,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> beforePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> afterPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> possibleSolutions,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> pointsMask)</code>
- <div class="block">Filters homography decompositions based on additional information.</div>
- </td>
- </tr>
- <tr id="i71" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#filterSpeckles-org.opencv.core.Mat-double-int-double-">filterSpeckles</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> img,
- double newVal,
- int maxSpeckleSize,
- double maxDiff)</code>
- <div class="block">Filters off small noise blobs (speckles) in the disparity map</div>
- </td>
- </tr>
- <tr id="i72" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#filterSpeckles-org.opencv.core.Mat-double-int-double-org.opencv.core.Mat-">filterSpeckles</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> img,
- double newVal,
- int maxSpeckleSize,
- double maxDiff,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> buf)</code>
- <div class="block">Filters off small noise blobs (speckles) in the disparity map</div>
- </td>
- </tr>
- <tr id="i73" class="rowColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#find4QuadCornerSubpix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-">find4QuadCornerSubpix</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> img,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> corners,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> region_size)</code> </td>
- </tr>
- <tr id="i74" class="altColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findChessboardCorners-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.MatOfPoint2f-">findChessboardCorners</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> corners)</code>
- <div class="block">Finds the positions of internal corners of the chessboard.</div>
- </td>
- </tr>
- <tr id="i75" class="rowColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findChessboardCorners-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.MatOfPoint2f-int-">findChessboardCorners</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> corners,
- int flags)</code>
- <div class="block">Finds the positions of internal corners of the chessboard.</div>
- </td>
- </tr>
- <tr id="i76" class="altColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findChessboardCornersSB-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-">findChessboardCornersSB</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> corners)</code> </td>
- </tr>
- <tr id="i77" class="rowColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findChessboardCornersSB-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-int-">findChessboardCornersSB</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> corners,
- int flags)</code> </td>
- </tr>
- <tr id="i78" class="altColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findChessboardCornersSBWithMeta-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-int-org.opencv.core.Mat-">findChessboardCornersSBWithMeta</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> corners,
- int flags,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> meta)</code>
- <div class="block">Finds the positions of internal corners of the chessboard using a sector based approach.</div>
- </td>
- </tr>
- <tr id="i79" class="rowColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findCirclesGrid-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-">findCirclesGrid</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> centers)</code> </td>
- </tr>
- <tr id="i80" class="altColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findCirclesGrid-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-int-">findCirclesGrid</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> centers,
- int flags)</code> </td>
- </tr>
- <tr id="i81" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2)</code> </td>
- </tr>
- <tr id="i82" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-double-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- double focal)</code> </td>
- </tr>
- <tr id="i83" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Point-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- double focal,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> pp)</code> </td>
- </tr>
- <tr id="i84" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Point-int-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- double focal,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> pp,
- int method)</code> </td>
- </tr>
- <tr id="i85" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Point-int-double-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- double focal,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> pp,
- int method,
- double prob)</code> </td>
- </tr>
- <tr id="i86" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Point-int-double-double-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- double focal,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> pp,
- int method,
- double prob,
- double threshold)</code> </td>
- </tr>
- <tr id="i87" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Point-int-double-double-int-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- double focal,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> pp,
- int method,
- double prob,
- double threshold,
- int maxIters)</code> </td>
- </tr>
- <tr id="i88" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Point-int-double-double-int-org.opencv.core.Mat-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- double focal,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> pp,
- int method,
- double prob,
- double threshold,
- int maxIters,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</code> </td>
- </tr>
- <tr id="i89" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix)</code>
- <div class="block">Calculates an essential matrix from the corresponding points in two images.</div>
- </td>
- </tr>
- <tr id="i90" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- int method)</code>
- <div class="block">Calculates an essential matrix from the corresponding points in two images.</div>
- </td>
- </tr>
- <tr id="i91" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- int method,
- double prob)</code>
- <div class="block">Calculates an essential matrix from the corresponding points in two images.</div>
- </td>
- </tr>
- <tr id="i92" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-double-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- int method,
- double prob,
- double threshold)</code>
- <div class="block">Calculates an essential matrix from the corresponding points in two images.</div>
- </td>
- </tr>
- <tr id="i93" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-double-int-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- int method,
- double prob,
- double threshold,
- int maxIters)</code>
- <div class="block">Calculates an essential matrix from the corresponding points in two images.</div>
- </td>
- </tr>
- <tr id="i94" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-double-int-org.opencv.core.Mat-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- int method,
- double prob,
- double threshold,
- int maxIters,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</code>
- <div class="block">Calculates an essential matrix from the corresponding points in two images.</div>
- </td>
- </tr>
- <tr id="i95" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2)</code>
- <div class="block">Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.</div>
- </td>
- </tr>
- <tr id="i96" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- int method)</code>
- <div class="block">Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.</div>
- </td>
- </tr>
- <tr id="i97" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- int method,
- double prob)</code>
- <div class="block">Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.</div>
- </td>
- </tr>
- <tr id="i98" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-double-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- int method,
- double prob,
- double threshold)</code>
- <div class="block">Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.</div>
- </td>
- </tr>
- <tr id="i99" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-double-org.opencv.core.Mat-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- int method,
- double prob,
- double threshold,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</code>
- <div class="block">Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.</div>
- </td>
- </tr>
- <tr id="i100" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.calib3d.UsacParams-">findEssentialMat</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dist_coeff1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dist_coeff2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask,
- <a href="../../../org/opencv/calib3d/UsacParams.html" title="class in org.opencv.calib3d">UsacParams</a> params)</code> </td>
- </tr>
- <tr id="i101" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findFundamentalMat-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-">findFundamentalMat</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points1,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points2)</code> </td>
- </tr>
- <tr id="i102" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findFundamentalMat-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-">findFundamentalMat</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points1,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points2,
- int method)</code> </td>
- </tr>
- <tr id="i103" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findFundamentalMat-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-">findFundamentalMat</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points1,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points2,
- int method,
- double ransacReprojThreshold)</code> </td>
- </tr>
- <tr id="i104" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findFundamentalMat-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-double-">findFundamentalMat</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points1,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points2,
- int method,
- double ransacReprojThreshold,
- double confidence)</code> </td>
- </tr>
- <tr id="i105" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findFundamentalMat-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-double-int-">findFundamentalMat</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points1,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points2,
- int method,
- double ransacReprojThreshold,
- double confidence,
- int maxIters)</code>
- <div class="block">Calculates a fundamental matrix from the corresponding points in two images.</div>
- </td>
- </tr>
- <tr id="i106" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findFundamentalMat-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-double-int-org.opencv.core.Mat-">findFundamentalMat</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points1,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points2,
- int method,
- double ransacReprojThreshold,
- double confidence,
- int maxIters,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</code>
- <div class="block">Calculates a fundamental matrix from the corresponding points in two images.</div>
- </td>
- </tr>
- <tr id="i107" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findFundamentalMat-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-double-org.opencv.core.Mat-">findFundamentalMat</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points1,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points2,
- int method,
- double ransacReprojThreshold,
- double confidence,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</code> </td>
- </tr>
- <tr id="i108" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findFundamentalMat-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.calib3d.UsacParams-">findFundamentalMat</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points1,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask,
- <a href="../../../org/opencv/calib3d/UsacParams.html" title="class in org.opencv.calib3d">UsacParams</a> params)</code> </td>
- </tr>
- <tr id="i109" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findHomography-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-">findHomography</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> srcPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dstPoints)</code>
- <div class="block">Finds a perspective transformation between two planes.</div>
- </td>
- </tr>
- <tr id="i110" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findHomography-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-">findHomography</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> srcPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dstPoints,
- int method)</code>
- <div class="block">Finds a perspective transformation between two planes.</div>
- </td>
- </tr>
- <tr id="i111" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findHomography-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-">findHomography</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> srcPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dstPoints,
- int method,
- double ransacReprojThreshold)</code>
- <div class="block">Finds a perspective transformation between two planes.</div>
- </td>
- </tr>
- <tr id="i112" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findHomography-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-org.opencv.core.Mat-">findHomography</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> srcPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dstPoints,
- int method,
- double ransacReprojThreshold,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</code>
- <div class="block">Finds a perspective transformation between two planes.</div>
- </td>
- </tr>
- <tr id="i113" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findHomography-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-org.opencv.core.Mat-int-">findHomography</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> srcPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dstPoints,
- int method,
- double ransacReprojThreshold,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask,
- int maxIters)</code>
- <div class="block">Finds a perspective transformation between two planes.</div>
- </td>
- </tr>
- <tr id="i114" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findHomography-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-org.opencv.core.Mat-int-double-">findHomography</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> srcPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dstPoints,
- int method,
- double ransacReprojThreshold,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask,
- int maxIters,
- double confidence)</code>
- <div class="block">Finds a perspective transformation between two planes.</div>
- </td>
- </tr>
- <tr id="i115" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#findHomography-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.calib3d.UsacParams-">findHomography</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> srcPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dstPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask,
- <a href="../../../org/opencv/calib3d/UsacParams.html" title="class in org.opencv.calib3d">UsacParams</a> params)</code> </td>
- </tr>
- <tr id="i116" class="altColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_calibrate-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-">fisheye_calibrate</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> image_size,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs)</code>
- <div class="block">Performs camera calibration</div>
- </td>
- </tr>
- <tr id="i117" class="rowColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_calibrate-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-int-">fisheye_calibrate</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> image_size,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- int flags)</code>
- <div class="block">Performs camera calibration</div>
- </td>
- </tr>
- <tr id="i118" class="altColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_calibrate-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-int-org.opencv.core.TermCriteria-">fisheye_calibrate</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> image_size,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</code>
- <div class="block">Performs camera calibration</div>
- </td>
- </tr>
- <tr id="i119" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_distortPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">fisheye_distortPoints</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D)</code>
- <div class="block">Distorts 2D points using fisheye model.</div>
- </td>
- </tr>
- <tr id="i120" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_distortPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-">fisheye_distortPoints</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- double alpha)</code>
- <div class="block">Distorts 2D points using fisheye model.</div>
- </td>
- </tr>
- <tr id="i121" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_estimateNewCameraMatrixForUndistortRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-">fisheye_estimateNewCameraMatrixForUndistortRectify</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> image_size,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P)</code>
- <div class="block">Estimates new camera intrinsic matrix for undistortion or rectification.</div>
- </td>
- </tr>
- <tr id="i122" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_estimateNewCameraMatrixForUndistortRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-double-">fisheye_estimateNewCameraMatrixForUndistortRectify</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> image_size,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P,
- double balance)</code>
- <div class="block">Estimates new camera intrinsic matrix for undistortion or rectification.</div>
- </td>
- </tr>
- <tr id="i123" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_estimateNewCameraMatrixForUndistortRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Size-">fisheye_estimateNewCameraMatrixForUndistortRectify</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> image_size,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P,
- double balance,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> new_size)</code>
- <div class="block">Estimates new camera intrinsic matrix for undistortion or rectification.</div>
- </td>
- </tr>
- <tr id="i124" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_estimateNewCameraMatrixForUndistortRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Size-double-">fisheye_estimateNewCameraMatrixForUndistortRectify</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> image_size,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P,
- double balance,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> new_size,
- double fov_scale)</code>
- <div class="block">Estimates new camera intrinsic matrix for undistortion or rectification.</div>
- </td>
- </tr>
- <tr id="i125" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_initUndistortRectifyMap-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-">fisheye_initUndistortRectifyMap</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> size,
- int m1type,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> map1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> map2)</code>
- <div class="block">Computes undistortion and rectification maps for image transform by #remap.</div>
- </td>
- </tr>
- <tr id="i126" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_projectPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">fisheye_projectPoints</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D)</code> </td>
- </tr>
- <tr id="i127" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_projectPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-">fisheye_projectPoints</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- double alpha)</code> </td>
- </tr>
- <tr id="i128" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_projectPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Mat-">fisheye_projectPoints</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- double alpha,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> jacobian)</code> </td>
- </tr>
- <tr id="i129" class="rowColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-">fisheye_stereoCalibrate</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T)</code> </td>
- </tr>
- <tr id="i130" class="altColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-int-">fisheye_stereoCalibrate</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- int flags)</code> </td>
- </tr>
- <tr id="i131" class="rowColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-int-org.opencv.core.TermCriteria-">fisheye_stereoCalibrate</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</code> </td>
- </tr>
- <tr id="i132" class="altColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-">fisheye_stereoCalibrate</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs)</code>
- <div class="block">Performs stereo calibration</div>
- </td>
- </tr>
- <tr id="i133" class="rowColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-int-">fisheye_stereoCalibrate</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- int flags)</code>
- <div class="block">Performs stereo calibration</div>
- </td>
- </tr>
- <tr id="i134" class="altColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-int-org.opencv.core.TermCriteria-">fisheye_stereoCalibrate</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</code>
- <div class="block">Performs stereo calibration</div>
- </td>
- </tr>
- <tr id="i135" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">fisheye_stereoRectify</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags)</code>
- <div class="block">Stereo rectification for fisheye camera model</div>
- </td>
- </tr>
- <tr id="i136" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-org.opencv.core.Size-">fisheye_stereoRectify</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImageSize)</code>
- <div class="block">Stereo rectification for fisheye camera model</div>
- </td>
- </tr>
- <tr id="i137" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-org.opencv.core.Size-double-">fisheye_stereoRectify</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImageSize,
- double balance)</code>
- <div class="block">Stereo rectification for fisheye camera model</div>
- </td>
- </tr>
- <tr id="i138" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-org.opencv.core.Size-double-double-">fisheye_stereoRectify</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImageSize,
- double balance,
- double fov_scale)</code>
- <div class="block">Stereo rectification for fisheye camera model</div>
- </td>
- </tr>
- <tr id="i139" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_undistortImage-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">fisheye_undistortImage</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D)</code>
- <div class="block">Transforms an image to compensate for fisheye lens distortion.</div>
- </td>
- </tr>
- <tr id="i140" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_undistortImage-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">fisheye_undistortImage</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Knew)</code>
- <div class="block">Transforms an image to compensate for fisheye lens distortion.</div>
- </td>
- </tr>
- <tr id="i141" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_undistortImage-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-">fisheye_undistortImage</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Knew,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> new_size)</code>
- <div class="block">Transforms an image to compensate for fisheye lens distortion.</div>
- </td>
- </tr>
- <tr id="i142" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_undistortPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">fisheye_undistortPoints</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D)</code>
- <div class="block">Undistorts 2D points using fisheye model</div>
- </td>
- </tr>
- <tr id="i143" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_undistortPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">fisheye_undistortPoints</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R)</code>
- <div class="block">Undistorts 2D points using fisheye model</div>
- </td>
- </tr>
- <tr id="i144" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_undistortPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">fisheye_undistortPoints</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P)</code>
- <div class="block">Undistorts 2D points using fisheye model</div>
- </td>
- </tr>
- <tr id="i145" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#fisheye_undistortPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.TermCriteria-">fisheye_undistortPoints</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</code>
- <div class="block">Undistorts 2D points using fisheye model</div>
- </td>
- </tr>
- <tr id="i146" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#getDefaultNewCameraMatrix-org.opencv.core.Mat-">getDefaultNewCameraMatrix</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix)</code>
- <div class="block">Returns the default new camera matrix.</div>
- </td>
- </tr>
- <tr id="i147" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#getDefaultNewCameraMatrix-org.opencv.core.Mat-org.opencv.core.Size-">getDefaultNewCameraMatrix</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imgsize)</code>
- <div class="block">Returns the default new camera matrix.</div>
- </td>
- </tr>
- <tr id="i148" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#getDefaultNewCameraMatrix-org.opencv.core.Mat-org.opencv.core.Size-boolean-">getDefaultNewCameraMatrix</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imgsize,
- boolean centerPrincipalPoint)</code>
- <div class="block">Returns the default new camera matrix.</div>
- </td>
- </tr>
- <tr id="i149" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#getOptimalNewCameraMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-double-">getOptimalNewCameraMatrix</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- double alpha)</code>
- <div class="block">Returns the new camera intrinsic matrix based on the free scaling parameter.</div>
- </td>
- </tr>
- <tr id="i150" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#getOptimalNewCameraMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-double-org.opencv.core.Size-">getOptimalNewCameraMatrix</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- double alpha,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImgSize)</code>
- <div class="block">Returns the new camera intrinsic matrix based on the free scaling parameter.</div>
- </td>
- </tr>
- <tr id="i151" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#getOptimalNewCameraMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-double-org.opencv.core.Size-org.opencv.core.Rect-">getOptimalNewCameraMatrix</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- double alpha,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImgSize,
- <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> validPixROI)</code>
- <div class="block">Returns the new camera intrinsic matrix based on the free scaling parameter.</div>
- </td>
- </tr>
- <tr id="i152" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#getOptimalNewCameraMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-double-org.opencv.core.Size-org.opencv.core.Rect-boolean-">getOptimalNewCameraMatrix</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- double alpha,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImgSize,
- <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> validPixROI,
- boolean centerPrincipalPoint)</code>
- <div class="block">Returns the new camera intrinsic matrix based on the free scaling parameter.</div>
- </td>
- </tr>
- <tr id="i153" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#getValidDisparityROI-org.opencv.core.Rect-org.opencv.core.Rect-int-int-int-">getValidDisparityROI</a></span>(<a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> roi1,
- <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> roi2,
- int minDisparity,
- int numberOfDisparities,
- int blockSize)</code> </td>
- </tr>
- <tr id="i154" class="altColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#initCameraMatrix2D-java.util.List-java.util.List-org.opencv.core.Size-">initCameraMatrix2D</a></span>(java.util.List<<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize)</code>
- <div class="block">Finds an initial camera intrinsic matrix from 3D-2D point correspondences.</div>
- </td>
- </tr>
- <tr id="i155" class="rowColor">
- <td class="colFirst"><code>static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a></code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#initCameraMatrix2D-java.util.List-java.util.List-org.opencv.core.Size-double-">initCameraMatrix2D</a></span>(java.util.List<<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- double aspectRatio)</code>
- <div class="block">Finds an initial camera intrinsic matrix from 3D-2D point correspondences.</div>
- </td>
- </tr>
- <tr id="i156" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#initInverseRectificationMap-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-">initInverseRectificationMap</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newCameraMatrix,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> size,
- int m1type,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> map1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> map2)</code>
- <div class="block">Computes the projection and inverse-rectification transformation map.</div>
- </td>
- </tr>
- <tr id="i157" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#initUndistortRectifyMap-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-">initUndistortRectifyMap</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newCameraMatrix,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> size,
- int m1type,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> map1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> map2)</code>
- <div class="block">Computes the undistortion and rectification transformation map.</div>
- </td>
- </tr>
- <tr id="i158" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#matMulDeriv-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">matMulDeriv</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> A,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> B,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dABdA,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dABdB)</code>
- <div class="block">Computes partial derivatives of the matrix product for each multiplied matrix.</div>
- </td>
- </tr>
- <tr id="i159" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#projectPoints-org.opencv.core.MatOfPoint3f-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.MatOfPoint2f-">projectPoints</a></span>(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints)</code>
- <div class="block">Projects 3D points to an image plane.</div>
- </td>
- </tr>
- <tr id="i160" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#projectPoints-org.opencv.core.MatOfPoint3f-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-">projectPoints</a></span>(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> jacobian)</code>
- <div class="block">Projects 3D points to an image plane.</div>
- </td>
- </tr>
- <tr id="i161" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#projectPoints-org.opencv.core.MatOfPoint3f-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-double-">projectPoints</a></span>(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> jacobian,
- double aspectRatio)</code>
- <div class="block">Projects 3D points to an image plane.</div>
- </td>
- </tr>
- <tr id="i162" class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">recoverPose</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t)</code> </td>
- </tr>
- <tr id="i163" class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-">recoverPose</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- double focal)</code> </td>
- </tr>
- <tr id="i164" class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Point-">recoverPose</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- double focal,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> pp)</code> </td>
- </tr>
- <tr id="i165" class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Point-org.opencv.core.Mat-">recoverPose</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- double focal,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> pp,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</code> </td>
- </tr>
- <tr id="i166" class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">recoverPose</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t)</code>
- <div class="block">Recovers the relative camera rotation and the translation from an estimated essential
- matrix and the corresponding points in two images, using chirality check.</div>
- </td>
- </tr>
- <tr id="i167" class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-">recoverPose</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- double distanceThresh)</code> </td>
- </tr>
- <tr id="i168" class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Mat-">recoverPose</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- double distanceThresh,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</code> </td>
- </tr>
- <tr id="i169" class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Mat-org.opencv.core.Mat-">recoverPose</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- double distanceThresh,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> triangulatedPoints)</code> </td>
- </tr>
- <tr id="i170" class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">recoverPose</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</code>
- <div class="block">Recovers the relative camera rotation and the translation from an estimated essential
- matrix and the corresponding points in two images, using chirality check.</div>
- </td>
- </tr>
- <tr id="i171" class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">recoverPose</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t)</code>
- <div class="block">Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check.</div>
- </td>
- </tr>
- <tr id="i172" class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">recoverPose</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- int method)</code>
- <div class="block">Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check.</div>
- </td>
- </tr>
- <tr id="i173" class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-">recoverPose</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- int method,
- double prob)</code>
- <div class="block">Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check.</div>
- </td>
- </tr>
- <tr id="i174" class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-double-">recoverPose</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- int method,
- double prob,
- double threshold)</code>
- <div class="block">Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check.</div>
- </td>
- </tr>
- <tr id="i175" class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-double-org.opencv.core.Mat-">recoverPose</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- int method,
- double prob,
- double threshold,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</code>
- <div class="block">Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check.</div>
- </td>
- </tr>
- <tr id="i176" class="altColor">
- <td class="colFirst"><code>static float</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#rectify3Collinear-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Size-org.opencv.core.Rect-org.opencv.core.Rect-int-">rectify3Collinear</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs3,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imgpt1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imgpt3,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R12,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T12,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R13,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T13,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- double alpha,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImgSize,
- <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> roi1,
- <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> roi2,
- int flags)</code> </td>
- </tr>
- <tr id="i177" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#reprojectImageTo3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">reprojectImageTo3D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> disparity,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> _3dImage,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q)</code>
- <div class="block">Reprojects a disparity image to 3D space.</div>
- </td>
- </tr>
- <tr id="i178" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#reprojectImageTo3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-boolean-">reprojectImageTo3D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> disparity,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> _3dImage,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- boolean handleMissingValues)</code>
- <div class="block">Reprojects a disparity image to 3D space.</div>
- </td>
- </tr>
- <tr id="i179" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#reprojectImageTo3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-boolean-int-">reprojectImageTo3D</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> disparity,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> _3dImage,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- boolean handleMissingValues,
- int ddepth)</code>
- <div class="block">Reprojects a disparity image to 3D space.</div>
- </td>
- </tr>
- <tr id="i180" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#Rodrigues-org.opencv.core.Mat-org.opencv.core.Mat-">Rodrigues</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst)</code>
- <div class="block">Converts a rotation matrix to a rotation vector or vice versa.</div>
- </td>
- </tr>
- <tr id="i181" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#Rodrigues-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">Rodrigues</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> jacobian)</code>
- <div class="block">Converts a rotation matrix to a rotation vector or vice versa.</div>
- </td>
- </tr>
- <tr id="i182" class="altColor">
- <td class="colFirst"><code>static double[]</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#RQDecomp3x3-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">RQDecomp3x3</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mtxR,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mtxQ)</code>
- <div class="block">Computes an RQ decomposition of 3x3 matrices.</div>
- </td>
- </tr>
- <tr id="i183" class="rowColor">
- <td class="colFirst"><code>static double[]</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#RQDecomp3x3-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">RQDecomp3x3</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mtxR,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mtxQ,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Qx)</code>
- <div class="block">Computes an RQ decomposition of 3x3 matrices.</div>
- </td>
- </tr>
- <tr id="i184" class="altColor">
- <td class="colFirst"><code>static double[]</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#RQDecomp3x3-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">RQDecomp3x3</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mtxR,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mtxQ,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Qx,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Qy)</code>
- <div class="block">Computes an RQ decomposition of 3x3 matrices.</div>
- </td>
- </tr>
- <tr id="i185" class="rowColor">
- <td class="colFirst"><code>static double[]</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#RQDecomp3x3-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">RQDecomp3x3</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mtxR,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mtxQ,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Qx,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Qy,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Qz)</code>
- <div class="block">Computes an RQ decomposition of 3x3 matrices.</div>
- </td>
- </tr>
- <tr id="i186" class="altColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#sampsonDistance-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">sampsonDistance</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> pt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> pt2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F)</code>
- <div class="block">Calculates the Sampson Distance between two points.</div>
- </td>
- </tr>
- <tr id="i187" class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solveP3P-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-int-">solveP3P</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- int flags)</code>
- <div class="block">Finds an object pose from 3 3D-2D point correspondences.</div>
- </td>
- </tr>
- <tr id="i188" class="altColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnP-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-">solvePnP</a></span>(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec)</code>
- <div class="block">Finds an object pose from 3D-2D point correspondences.</div>
- </td>
- </tr>
- <tr id="i189" class="rowColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnP-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-boolean-">solvePnP</a></span>(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- boolean useExtrinsicGuess)</code>
- <div class="block">Finds an object pose from 3D-2D point correspondences.</div>
- </td>
- </tr>
- <tr id="i190" class="altColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnP-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-boolean-int-">solvePnP</a></span>(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- boolean useExtrinsicGuess,
- int flags)</code>
- <div class="block">Finds an object pose from 3D-2D point correspondences.</div>
- </td>
- </tr>
- <tr id="i191" class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPGeneric-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-">solvePnPGeneric</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs)</code>
- <div class="block">Finds an object pose from 3D-2D point correspondences.</div>
- </td>
- </tr>
- <tr id="i192" class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPGeneric-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-boolean-">solvePnPGeneric</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- boolean useExtrinsicGuess)</code>
- <div class="block">Finds an object pose from 3D-2D point correspondences.</div>
- </td>
- </tr>
- <tr id="i193" class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPGeneric-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-boolean-int-">solvePnPGeneric</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- boolean useExtrinsicGuess,
- int flags)</code>
- <div class="block">Finds an object pose from 3D-2D point correspondences.</div>
- </td>
- </tr>
- <tr id="i194" class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPGeneric-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-boolean-int-org.opencv.core.Mat-">solvePnPGeneric</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- boolean useExtrinsicGuess,
- int flags,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec)</code>
- <div class="block">Finds an object pose from 3D-2D point correspondences.</div>
- </td>
- </tr>
- <tr id="i195" class="rowColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPGeneric-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-boolean-int-org.opencv.core.Mat-org.opencv.core.Mat-">solvePnPGeneric</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- boolean useExtrinsicGuess,
- int flags,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec)</code>
- <div class="block">Finds an object pose from 3D-2D point correspondences.</div>
- </td>
- </tr>
- <tr id="i196" class="altColor">
- <td class="colFirst"><code>static int</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPGeneric-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-boolean-int-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">solvePnPGeneric</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- boolean useExtrinsicGuess,
- int flags,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> reprojectionError)</code>
- <div class="block">Finds an object pose from 3D-2D point correspondences.</div>
- </td>
- </tr>
- <tr id="i197" class="rowColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-">solvePnPRansac</a></span>(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec)</code>
- <div class="block">Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.</div>
- </td>
- </tr>
- <tr id="i198" class="altColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-boolean-">solvePnPRansac</a></span>(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- boolean useExtrinsicGuess)</code>
- <div class="block">Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.</div>
- </td>
- </tr>
- <tr id="i199" class="rowColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-boolean-int-">solvePnPRansac</a></span>(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- boolean useExtrinsicGuess,
- int iterationsCount)</code>
- <div class="block">Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.</div>
- </td>
- </tr>
- <tr id="i200" class="altColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-boolean-int-float-">solvePnPRansac</a></span>(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- boolean useExtrinsicGuess,
- int iterationsCount,
- float reprojectionError)</code>
- <div class="block">Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.</div>
- </td>
- </tr>
- <tr id="i201" class="rowColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-boolean-int-float-double-">solvePnPRansac</a></span>(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- boolean useExtrinsicGuess,
- int iterationsCount,
- float reprojectionError,
- double confidence)</code>
- <div class="block">Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.</div>
- </td>
- </tr>
- <tr id="i202" class="altColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-boolean-int-float-double-org.opencv.core.Mat-">solvePnPRansac</a></span>(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- boolean useExtrinsicGuess,
- int iterationsCount,
- float reprojectionError,
- double confidence,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers)</code>
- <div class="block">Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.</div>
- </td>
- </tr>
- <tr id="i203" class="rowColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-boolean-int-float-double-org.opencv.core.Mat-int-">solvePnPRansac</a></span>(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- boolean useExtrinsicGuess,
- int iterationsCount,
- float reprojectionError,
- double confidence,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int flags)</code>
- <div class="block">Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.</div>
- </td>
- </tr>
- <tr id="i204" class="altColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">solvePnPRansac</a></span>(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers)</code> </td>
- </tr>
- <tr id="i205" class="rowColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.calib3d.UsacParams-">solvePnPRansac</a></span>(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- <a href="../../../org/opencv/calib3d/UsacParams.html" title="class in org.opencv.calib3d">UsacParams</a> params)</code> </td>
- </tr>
- <tr id="i206" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPRefineLM-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">solvePnPRefineLM</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec)</code>
- <div class="block">Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
- to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.</div>
- </td>
- </tr>
- <tr id="i207" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPRefineLM-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.TermCriteria-">solvePnPRefineLM</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</code>
- <div class="block">Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
- to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.</div>
- </td>
- </tr>
- <tr id="i208" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPRefineVVS-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">solvePnPRefineVVS</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec)</code>
- <div class="block">Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
- to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.</div>
- </td>
- </tr>
- <tr id="i209" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPRefineVVS-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.TermCriteria-">solvePnPRefineVVS</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</code>
- <div class="block">Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
- to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.</div>
- </td>
- </tr>
- <tr id="i210" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#solvePnPRefineVVS-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.TermCriteria-double-">solvePnPRefineVVS</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria,
- double VVSlambda)</code>
- <div class="block">Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
- to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.</div>
- </td>
- </tr>
- <tr id="i211" class="rowColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">stereoCalibrate</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F)</code> </td>
- </tr>
- <tr id="i212" class="altColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">stereoCalibrate</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- int flags)</code> </td>
- </tr>
- <tr id="i213" class="rowColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-org.opencv.core.TermCriteria-">stereoCalibrate</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</code> </td>
- </tr>
- <tr id="i214" class="altColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">stereoCalibrate</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors)</code> </td>
- </tr>
- <tr id="i215" class="rowColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">stereoCalibrate</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors,
- int flags)</code> </td>
- </tr>
- <tr id="i216" class="altColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-org.opencv.core.TermCriteria-">stereoCalibrate</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</code> </td>
- </tr>
- <tr id="i217" class="rowColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoCalibrateExtended-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-">stereoCalibrateExtended</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors)</code>
- <div class="block">Calibrates a stereo camera set up.</div>
- </td>
- </tr>
- <tr id="i218" class="altColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoCalibrateExtended-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-int-">stereoCalibrateExtended</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors,
- int flags)</code>
- <div class="block">Calibrates a stereo camera set up.</div>
- </td>
- </tr>
- <tr id="i219" class="rowColor">
- <td class="colFirst"><code>static double</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoCalibrateExtended-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-int-org.opencv.core.TermCriteria-">stereoCalibrateExtended</a></span>(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</code>
- <div class="block">Calibrates a stereo camera set up.</div>
- </td>
- </tr>
- <tr id="i220" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">stereoRectify</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q)</code>
- <div class="block">Computes rectification transforms for each head of a calibrated stereo camera.</div>
- </td>
- </tr>
- <tr id="i221" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">stereoRectify</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags)</code>
- <div class="block">Computes rectification transforms for each head of a calibrated stereo camera.</div>
- </td>
- </tr>
- <tr id="i222" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-">stereoRectify</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags,
- double alpha)</code>
- <div class="block">Computes rectification transforms for each head of a calibrated stereo camera.</div>
- </td>
- </tr>
- <tr id="i223" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-org.opencv.core.Size-">stereoRectify</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags,
- double alpha,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImageSize)</code>
- <div class="block">Computes rectification transforms for each head of a calibrated stereo camera.</div>
- </td>
- </tr>
- <tr id="i224" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-org.opencv.core.Size-org.opencv.core.Rect-">stereoRectify</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags,
- double alpha,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImageSize,
- <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> validPixROI1)</code>
- <div class="block">Computes rectification transforms for each head of a calibrated stereo camera.</div>
- </td>
- </tr>
- <tr id="i225" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-org.opencv.core.Size-org.opencv.core.Rect-org.opencv.core.Rect-">stereoRectify</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags,
- double alpha,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImageSize,
- <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> validPixROI1,
- <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> validPixROI2)</code>
- <div class="block">Computes rectification transforms for each head of a calibrated stereo camera.</div>
- </td>
- </tr>
- <tr id="i226" class="altColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoRectifyUncalibrated-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-">stereoRectifyUncalibrated</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imgSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> H1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> H2)</code>
- <div class="block">Computes a rectification transform for an uncalibrated stereo camera.</div>
- </td>
- </tr>
- <tr id="i227" class="rowColor">
- <td class="colFirst"><code>static boolean</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#stereoRectifyUncalibrated-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-double-">stereoRectifyUncalibrated</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imgSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> H1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> H2,
- double threshold)</code>
- <div class="block">Computes a rectification transform for an uncalibrated stereo camera.</div>
- </td>
- </tr>
- <tr id="i228" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#triangulatePoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">triangulatePoints</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projMatr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projMatr2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projPoints1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projPoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points4D)</code>
- <div class="block">This function reconstructs 3-dimensional points (in homogeneous coordinates) by using
- their observations with a stereo camera.</div>
- </td>
- </tr>
- <tr id="i229" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#undistort-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">undistort</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs)</code>
- <div class="block">Transforms an image to compensate for lens distortion.</div>
- </td>
- </tr>
- <tr id="i230" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#undistort-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">undistort</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newCameraMatrix)</code>
- <div class="block">Transforms an image to compensate for lens distortion.</div>
- </td>
- </tr>
- <tr id="i231" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#undistortImagePoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">undistortImagePoints</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs)</code>
- <div class="block">Compute undistorted image points position</div>
- </td>
- </tr>
- <tr id="i232" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#undistortImagePoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.TermCriteria-">undistortImagePoints</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> arg1)</code>
- <div class="block">Compute undistorted image points position</div>
- </td>
- </tr>
- <tr id="i233" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#undistortPoints-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.Mat-">undistortPoints</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> src,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs)</code>
- <div class="block">Computes the ideal point coordinates from the observed point coordinates.</div>
- </td>
- </tr>
- <tr id="i234" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#undistortPoints-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">undistortPoints</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> src,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R)</code>
- <div class="block">Computes the ideal point coordinates from the observed point coordinates.</div>
- </td>
- </tr>
- <tr id="i235" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#undistortPoints-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">undistortPoints</a></span>(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> src,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P)</code>
- <div class="block">Computes the ideal point coordinates from the observed point coordinates.</div>
- </td>
- </tr>
- <tr id="i236" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#undistortPointsIter-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.TermCriteria-">undistortPointsIter</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</code>
- <div class="block"><b>Note:</b> Default version of #undistortPoints does 5 iterations to compute undistorted points.</div>
- </td>
- </tr>
- <tr id="i237" class="rowColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#validateDisparity-org.opencv.core.Mat-org.opencv.core.Mat-int-int-">validateDisparity</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> disparity,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cost,
- int minDisparity,
- int numberOfDisparities)</code> </td>
- </tr>
- <tr id="i238" class="altColor">
- <td class="colFirst"><code>static void</code></td>
- <td class="colLast"><code><span class="memberNameLink"><a href="../../../org/opencv/calib3d/Calib3d.html#validateDisparity-org.opencv.core.Mat-org.opencv.core.Mat-int-int-int-">validateDisparity</a></span>(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> disparity,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cost,
- int minDisparity,
- int numberOfDisparities,
- int disp12MaxDisp)</code> </td>
- </tr>
- </table>
- <ul class="blockList">
- <li class="blockList"><a name="methods.inherited.from.class.java.lang.Object">
- <!-- -->
- </a>
- <h3>Methods inherited from class java.lang.Object</h3>
- <code>equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait</code></li>
- </ul>
- </li>
- </ul>
- </li>
- </ul>
- </div>
- <div class="details">
- <ul class="blockList">
- <li class="blockList">
- <!-- ============ FIELD DETAIL =========== -->
- <ul class="blockList">
- <li class="blockList"><a name="field.detail">
- <!-- -->
- </a>
- <h3>Field Detail</h3>
- <a name="CALIB_CB_ACCURACY">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_CB_ACCURACY</h4>
- <pre>public static final int CALIB_CB_ACCURACY</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_CB_ACCURACY">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_CB_ADAPTIVE_THRESH">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_CB_ADAPTIVE_THRESH</h4>
- <pre>public static final int CALIB_CB_ADAPTIVE_THRESH</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_CB_ADAPTIVE_THRESH">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_CB_ASYMMETRIC_GRID">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_CB_ASYMMETRIC_GRID</h4>
- <pre>public static final int CALIB_CB_ASYMMETRIC_GRID</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_CB_ASYMMETRIC_GRID">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_CB_CLUSTERING">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_CB_CLUSTERING</h4>
- <pre>public static final int CALIB_CB_CLUSTERING</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_CB_CLUSTERING">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_CB_EXHAUSTIVE">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_CB_EXHAUSTIVE</h4>
- <pre>public static final int CALIB_CB_EXHAUSTIVE</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_CB_EXHAUSTIVE">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_CB_FAST_CHECK">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_CB_FAST_CHECK</h4>
- <pre>public static final int CALIB_CB_FAST_CHECK</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_CB_FAST_CHECK">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_CB_FILTER_QUADS">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_CB_FILTER_QUADS</h4>
- <pre>public static final int CALIB_CB_FILTER_QUADS</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_CB_FILTER_QUADS">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_CB_LARGER">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_CB_LARGER</h4>
- <pre>public static final int CALIB_CB_LARGER</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_CB_LARGER">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_CB_MARKER">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_CB_MARKER</h4>
- <pre>public static final int CALIB_CB_MARKER</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_CB_MARKER">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_CB_NORMALIZE_IMAGE">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_CB_NORMALIZE_IMAGE</h4>
- <pre>public static final int CALIB_CB_NORMALIZE_IMAGE</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_CB_NORMALIZE_IMAGE">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_CB_SYMMETRIC_GRID">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_CB_SYMMETRIC_GRID</h4>
- <pre>public static final int CALIB_CB_SYMMETRIC_GRID</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_CB_SYMMETRIC_GRID">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_FIX_ASPECT_RATIO">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_FIX_ASPECT_RATIO</h4>
- <pre>public static final int CALIB_FIX_ASPECT_RATIO</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_FIX_ASPECT_RATIO">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_FIX_FOCAL_LENGTH">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_FIX_FOCAL_LENGTH</h4>
- <pre>public static final int CALIB_FIX_FOCAL_LENGTH</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_FIX_FOCAL_LENGTH">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_FIX_INTRINSIC">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_FIX_INTRINSIC</h4>
- <pre>public static final int CALIB_FIX_INTRINSIC</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_FIX_INTRINSIC">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_FIX_K1">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_FIX_K1</h4>
- <pre>public static final int CALIB_FIX_K1</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_FIX_K1">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_FIX_K2">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_FIX_K2</h4>
- <pre>public static final int CALIB_FIX_K2</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_FIX_K2">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_FIX_K3">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_FIX_K3</h4>
- <pre>public static final int CALIB_FIX_K3</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_FIX_K3">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_FIX_K4">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_FIX_K4</h4>
- <pre>public static final int CALIB_FIX_K4</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_FIX_K4">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_FIX_K5">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_FIX_K5</h4>
- <pre>public static final int CALIB_FIX_K5</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_FIX_K5">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_FIX_K6">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_FIX_K6</h4>
- <pre>public static final int CALIB_FIX_K6</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_FIX_K6">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_FIX_PRINCIPAL_POINT">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_FIX_PRINCIPAL_POINT</h4>
- <pre>public static final int CALIB_FIX_PRINCIPAL_POINT</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_FIX_PRINCIPAL_POINT">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_FIX_S1_S2_S3_S4">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_FIX_S1_S2_S3_S4</h4>
- <pre>public static final int CALIB_FIX_S1_S2_S3_S4</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_FIX_S1_S2_S3_S4">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_FIX_TANGENT_DIST">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_FIX_TANGENT_DIST</h4>
- <pre>public static final int CALIB_FIX_TANGENT_DIST</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_FIX_TANGENT_DIST">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_FIX_TAUX_TAUY">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_FIX_TAUX_TAUY</h4>
- <pre>public static final int CALIB_FIX_TAUX_TAUY</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_FIX_TAUX_TAUY">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_HAND_EYE_ANDREFF">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_HAND_EYE_ANDREFF</h4>
- <pre>public static final int CALIB_HAND_EYE_ANDREFF</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_HAND_EYE_ANDREFF">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_HAND_EYE_DANIILIDIS">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_HAND_EYE_DANIILIDIS</h4>
- <pre>public static final int CALIB_HAND_EYE_DANIILIDIS</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_HAND_EYE_DANIILIDIS">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_HAND_EYE_HORAUD">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_HAND_EYE_HORAUD</h4>
- <pre>public static final int CALIB_HAND_EYE_HORAUD</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_HAND_EYE_HORAUD">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_HAND_EYE_PARK">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_HAND_EYE_PARK</h4>
- <pre>public static final int CALIB_HAND_EYE_PARK</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_HAND_EYE_PARK">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_HAND_EYE_TSAI">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_HAND_EYE_TSAI</h4>
- <pre>public static final int CALIB_HAND_EYE_TSAI</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_HAND_EYE_TSAI">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_NINTRINSIC">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_NINTRINSIC</h4>
- <pre>public static final int CALIB_NINTRINSIC</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_NINTRINSIC">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_RATIONAL_MODEL">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_RATIONAL_MODEL</h4>
- <pre>public static final int CALIB_RATIONAL_MODEL</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_RATIONAL_MODEL">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_ROBOT_WORLD_HAND_EYE_LI">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_ROBOT_WORLD_HAND_EYE_LI</h4>
- <pre>public static final int CALIB_ROBOT_WORLD_HAND_EYE_LI</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_ROBOT_WORLD_HAND_EYE_LI">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_ROBOT_WORLD_HAND_EYE_SHAH">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_ROBOT_WORLD_HAND_EYE_SHAH</h4>
- <pre>public static final int CALIB_ROBOT_WORLD_HAND_EYE_SHAH</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_ROBOT_WORLD_HAND_EYE_SHAH">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_SAME_FOCAL_LENGTH">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_SAME_FOCAL_LENGTH</h4>
- <pre>public static final int CALIB_SAME_FOCAL_LENGTH</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_SAME_FOCAL_LENGTH">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_THIN_PRISM_MODEL">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_THIN_PRISM_MODEL</h4>
- <pre>public static final int CALIB_THIN_PRISM_MODEL</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_THIN_PRISM_MODEL">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_TILTED_MODEL">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_TILTED_MODEL</h4>
- <pre>public static final int CALIB_TILTED_MODEL</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_TILTED_MODEL">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_USE_EXTRINSIC_GUESS">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_USE_EXTRINSIC_GUESS</h4>
- <pre>public static final int CALIB_USE_EXTRINSIC_GUESS</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_USE_EXTRINSIC_GUESS">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_USE_INTRINSIC_GUESS">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_USE_INTRINSIC_GUESS</h4>
- <pre>public static final int CALIB_USE_INTRINSIC_GUESS</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_USE_INTRINSIC_GUESS">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_USE_LU">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_USE_LU</h4>
- <pre>public static final int CALIB_USE_LU</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_USE_LU">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_USE_QR">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_USE_QR</h4>
- <pre>public static final int CALIB_USE_QR</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_USE_QR">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_ZERO_DISPARITY">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_ZERO_DISPARITY</h4>
- <pre>public static final int CALIB_ZERO_DISPARITY</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_ZERO_DISPARITY">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CALIB_ZERO_TANGENT_DIST">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CALIB_ZERO_TANGENT_DIST</h4>
- <pre>public static final int CALIB_ZERO_TANGENT_DIST</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CALIB_ZERO_TANGENT_DIST">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CirclesGridFinderParameters_ASYMMETRIC_GRID">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CirclesGridFinderParameters_ASYMMETRIC_GRID</h4>
- <pre>public static final int CirclesGridFinderParameters_ASYMMETRIC_GRID</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CirclesGridFinderParameters_ASYMMETRIC_GRID">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CirclesGridFinderParameters_SYMMETRIC_GRID">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CirclesGridFinderParameters_SYMMETRIC_GRID</h4>
- <pre>public static final int CirclesGridFinderParameters_SYMMETRIC_GRID</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CirclesGridFinderParameters_SYMMETRIC_GRID">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="COV_POLISHER">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>COV_POLISHER</h4>
- <pre>public static final int COV_POLISHER</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.COV_POLISHER">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CV_DLS">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CV_DLS</h4>
- <pre>public static final int CV_DLS</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CV_DLS">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CV_EPNP">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CV_EPNP</h4>
- <pre>public static final int CV_EPNP</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CV_EPNP">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CV_ITERATIVE">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CV_ITERATIVE</h4>
- <pre>public static final int CV_ITERATIVE</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CV_ITERATIVE">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CV_P3P">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CV_P3P</h4>
- <pre>public static final int CV_P3P</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CV_P3P">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CvLevMarq_CALC_J">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CvLevMarq_CALC_J</h4>
- <pre>public static final int CvLevMarq_CALC_J</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CvLevMarq_CALC_J">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CvLevMarq_CHECK_ERR">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CvLevMarq_CHECK_ERR</h4>
- <pre>public static final int CvLevMarq_CHECK_ERR</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CvLevMarq_CHECK_ERR">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CvLevMarq_DONE">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CvLevMarq_DONE</h4>
- <pre>public static final int CvLevMarq_DONE</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CvLevMarq_DONE">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="CvLevMarq_STARTED">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>CvLevMarq_STARTED</h4>
- <pre>public static final int CvLevMarq_STARTED</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.CvLevMarq_STARTED">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_CALIB_CHECK_COND">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_CALIB_CHECK_COND</h4>
- <pre>public static final int fisheye_CALIB_CHECK_COND</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.fisheye_CALIB_CHECK_COND">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_CALIB_FIX_FOCAL_LENGTH">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_CALIB_FIX_FOCAL_LENGTH</h4>
- <pre>public static final int fisheye_CALIB_FIX_FOCAL_LENGTH</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.fisheye_CALIB_FIX_FOCAL_LENGTH">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_CALIB_FIX_INTRINSIC">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_CALIB_FIX_INTRINSIC</h4>
- <pre>public static final int fisheye_CALIB_FIX_INTRINSIC</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.fisheye_CALIB_FIX_INTRINSIC">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_CALIB_FIX_K1">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_CALIB_FIX_K1</h4>
- <pre>public static final int fisheye_CALIB_FIX_K1</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.fisheye_CALIB_FIX_K1">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_CALIB_FIX_K2">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_CALIB_FIX_K2</h4>
- <pre>public static final int fisheye_CALIB_FIX_K2</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.fisheye_CALIB_FIX_K2">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_CALIB_FIX_K3">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_CALIB_FIX_K3</h4>
- <pre>public static final int fisheye_CALIB_FIX_K3</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.fisheye_CALIB_FIX_K3">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_CALIB_FIX_K4">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_CALIB_FIX_K4</h4>
- <pre>public static final int fisheye_CALIB_FIX_K4</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.fisheye_CALIB_FIX_K4">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_CALIB_FIX_PRINCIPAL_POINT">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_CALIB_FIX_PRINCIPAL_POINT</h4>
- <pre>public static final int fisheye_CALIB_FIX_PRINCIPAL_POINT</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.fisheye_CALIB_FIX_PRINCIPAL_POINT">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_CALIB_FIX_SKEW">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_CALIB_FIX_SKEW</h4>
- <pre>public static final int fisheye_CALIB_FIX_SKEW</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.fisheye_CALIB_FIX_SKEW">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_CALIB_RECOMPUTE_EXTRINSIC">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_CALIB_RECOMPUTE_EXTRINSIC</h4>
- <pre>public static final int fisheye_CALIB_RECOMPUTE_EXTRINSIC</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.fisheye_CALIB_RECOMPUTE_EXTRINSIC">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_CALIB_USE_INTRINSIC_GUESS">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_CALIB_USE_INTRINSIC_GUESS</h4>
- <pre>public static final int fisheye_CALIB_USE_INTRINSIC_GUESS</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.fisheye_CALIB_USE_INTRINSIC_GUESS">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_CALIB_ZERO_DISPARITY">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_CALIB_ZERO_DISPARITY</h4>
- <pre>public static final int fisheye_CALIB_ZERO_DISPARITY</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.fisheye_CALIB_ZERO_DISPARITY">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="FM_7POINT">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>FM_7POINT</h4>
- <pre>public static final int FM_7POINT</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.FM_7POINT">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="FM_8POINT">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>FM_8POINT</h4>
- <pre>public static final int FM_8POINT</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.FM_8POINT">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="FM_LMEDS">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>FM_LMEDS</h4>
- <pre>public static final int FM_LMEDS</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.FM_LMEDS">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="FM_RANSAC">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>FM_RANSAC</h4>
- <pre>public static final int FM_RANSAC</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.FM_RANSAC">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="LMEDS">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>LMEDS</h4>
- <pre>public static final int LMEDS</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.LMEDS">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="LOCAL_OPTIM_GC">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>LOCAL_OPTIM_GC</h4>
- <pre>public static final int LOCAL_OPTIM_GC</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.LOCAL_OPTIM_GC">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="LOCAL_OPTIM_INNER_AND_ITER_LO">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>LOCAL_OPTIM_INNER_AND_ITER_LO</h4>
- <pre>public static final int LOCAL_OPTIM_INNER_AND_ITER_LO</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.LOCAL_OPTIM_INNER_AND_ITER_LO">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="LOCAL_OPTIM_INNER_LO">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>LOCAL_OPTIM_INNER_LO</h4>
- <pre>public static final int LOCAL_OPTIM_INNER_LO</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.LOCAL_OPTIM_INNER_LO">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="LOCAL_OPTIM_NULL">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>LOCAL_OPTIM_NULL</h4>
- <pre>public static final int LOCAL_OPTIM_NULL</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.LOCAL_OPTIM_NULL">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="LOCAL_OPTIM_SIGMA">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>LOCAL_OPTIM_SIGMA</h4>
- <pre>public static final int LOCAL_OPTIM_SIGMA</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.LOCAL_OPTIM_SIGMA">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="LSQ_POLISHER">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>LSQ_POLISHER</h4>
- <pre>public static final int LSQ_POLISHER</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.LSQ_POLISHER">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="MAGSAC">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>MAGSAC</h4>
- <pre>public static final int MAGSAC</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.MAGSAC">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="NEIGH_FLANN_KNN">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>NEIGH_FLANN_KNN</h4>
- <pre>public static final int NEIGH_FLANN_KNN</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.NEIGH_FLANN_KNN">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="NEIGH_FLANN_RADIUS">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>NEIGH_FLANN_RADIUS</h4>
- <pre>public static final int NEIGH_FLANN_RADIUS</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.NEIGH_FLANN_RADIUS">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="NEIGH_GRID">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>NEIGH_GRID</h4>
- <pre>public static final int NEIGH_GRID</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.NEIGH_GRID">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="NONE_POLISHER">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>NONE_POLISHER</h4>
- <pre>public static final int NONE_POLISHER</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.NONE_POLISHER">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="PROJ_SPHERICAL_EQRECT">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>PROJ_SPHERICAL_EQRECT</h4>
- <pre>public static final int PROJ_SPHERICAL_EQRECT</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.PROJ_SPHERICAL_EQRECT">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="PROJ_SPHERICAL_ORTHO">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>PROJ_SPHERICAL_ORTHO</h4>
- <pre>public static final int PROJ_SPHERICAL_ORTHO</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.PROJ_SPHERICAL_ORTHO">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="RANSAC">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>RANSAC</h4>
- <pre>public static final int RANSAC</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.RANSAC">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="RHO">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>RHO</h4>
- <pre>public static final int RHO</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.RHO">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SAMPLING_NAPSAC">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SAMPLING_NAPSAC</h4>
- <pre>public static final int SAMPLING_NAPSAC</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SAMPLING_NAPSAC">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SAMPLING_PROGRESSIVE_NAPSAC">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SAMPLING_PROGRESSIVE_NAPSAC</h4>
- <pre>public static final int SAMPLING_PROGRESSIVE_NAPSAC</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SAMPLING_PROGRESSIVE_NAPSAC">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SAMPLING_PROSAC">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SAMPLING_PROSAC</h4>
- <pre>public static final int SAMPLING_PROSAC</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SAMPLING_PROSAC">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SAMPLING_UNIFORM">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SAMPLING_UNIFORM</h4>
- <pre>public static final int SAMPLING_UNIFORM</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SAMPLING_UNIFORM">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SCORE_METHOD_LMEDS">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SCORE_METHOD_LMEDS</h4>
- <pre>public static final int SCORE_METHOD_LMEDS</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SCORE_METHOD_LMEDS">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SCORE_METHOD_MAGSAC">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SCORE_METHOD_MAGSAC</h4>
- <pre>public static final int SCORE_METHOD_MAGSAC</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SCORE_METHOD_MAGSAC">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SCORE_METHOD_MSAC">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SCORE_METHOD_MSAC</h4>
- <pre>public static final int SCORE_METHOD_MSAC</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SCORE_METHOD_MSAC">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SCORE_METHOD_RANSAC">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SCORE_METHOD_RANSAC</h4>
- <pre>public static final int SCORE_METHOD_RANSAC</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SCORE_METHOD_RANSAC">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SOLVEPNP_AP3P">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SOLVEPNP_AP3P</h4>
- <pre>public static final int SOLVEPNP_AP3P</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SOLVEPNP_AP3P">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SOLVEPNP_DLS">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SOLVEPNP_DLS</h4>
- <pre>public static final int SOLVEPNP_DLS</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SOLVEPNP_DLS">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SOLVEPNP_EPNP">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SOLVEPNP_EPNP</h4>
- <pre>public static final int SOLVEPNP_EPNP</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SOLVEPNP_EPNP">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SOLVEPNP_IPPE">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SOLVEPNP_IPPE</h4>
- <pre>public static final int SOLVEPNP_IPPE</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SOLVEPNP_IPPE">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SOLVEPNP_IPPE_SQUARE">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SOLVEPNP_IPPE_SQUARE</h4>
- <pre>public static final int SOLVEPNP_IPPE_SQUARE</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SOLVEPNP_IPPE_SQUARE">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SOLVEPNP_ITERATIVE">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SOLVEPNP_ITERATIVE</h4>
- <pre>public static final int SOLVEPNP_ITERATIVE</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SOLVEPNP_ITERATIVE">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SOLVEPNP_MAX_COUNT">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SOLVEPNP_MAX_COUNT</h4>
- <pre>public static final int SOLVEPNP_MAX_COUNT</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SOLVEPNP_MAX_COUNT">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SOLVEPNP_P3P">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SOLVEPNP_P3P</h4>
- <pre>public static final int SOLVEPNP_P3P</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SOLVEPNP_P3P">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SOLVEPNP_SQPNP">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SOLVEPNP_SQPNP</h4>
- <pre>public static final int SOLVEPNP_SQPNP</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SOLVEPNP_SQPNP">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="SOLVEPNP_UPNP">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>SOLVEPNP_UPNP</h4>
- <pre>public static final int SOLVEPNP_UPNP</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.SOLVEPNP_UPNP">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="USAC_ACCURATE">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>USAC_ACCURATE</h4>
- <pre>public static final int USAC_ACCURATE</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.USAC_ACCURATE">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="USAC_DEFAULT">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>USAC_DEFAULT</h4>
- <pre>public static final int USAC_DEFAULT</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.USAC_DEFAULT">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="USAC_FAST">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>USAC_FAST</h4>
- <pre>public static final int USAC_FAST</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.USAC_FAST">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="USAC_FM_8PTS">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>USAC_FM_8PTS</h4>
- <pre>public static final int USAC_FM_8PTS</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.USAC_FM_8PTS">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="USAC_MAGSAC">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>USAC_MAGSAC</h4>
- <pre>public static final int USAC_MAGSAC</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.USAC_MAGSAC">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="USAC_PARALLEL">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>USAC_PARALLEL</h4>
- <pre>public static final int USAC_PARALLEL</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.USAC_PARALLEL">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- <a name="USAC_PROSAC">
- <!-- -->
- </a>
- <ul class="blockListLast">
- <li class="blockList">
- <h4>USAC_PROSAC</h4>
- <pre>public static final int USAC_PROSAC</pre>
- <dl>
- <dt><span class="seeLabel">See Also:</span></dt>
- <dd><a href="../../../constant-values.html#org.opencv.calib3d.Calib3d.USAC_PROSAC">Constant Field Values</a></dd>
- </dl>
- </li>
- </ul>
- </li>
- </ul>
- <!-- ========= CONSTRUCTOR DETAIL ======== -->
- <ul class="blockList">
- <li class="blockList"><a name="constructor.detail">
- <!-- -->
- </a>
- <h3>Constructor Detail</h3>
- <a name="Calib3d--">
- <!-- -->
- </a>
- <ul class="blockListLast">
- <li class="blockList">
- <h4>Calib3d</h4>
- <pre>public Calib3d()</pre>
- </li>
- </ul>
- </li>
- </ul>
- <!-- ============ METHOD DETAIL ========== -->
- <ul class="blockList">
- <li class="blockList"><a name="method.detail">
- <!-- -->
- </a>
- <h3>Method Detail</h3>
- <a name="calibrateCamera-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrateCamera</h4>
- <pre>public static double calibrateCamera(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs)</pre>
- </li>
- </ul>
- <a name="calibrateCamera-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrateCamera</h4>
- <pre>public static double calibrateCamera(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- int flags)</pre>
- </li>
- </ul>
- <a name="calibrateCamera-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-int-org.opencv.core.TermCriteria-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrateCamera</h4>
- <pre>public static double calibrateCamera(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</pre>
- </li>
- </ul>
- <a name="calibrateCameraExtended-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrateCameraExtended</h4>
- <pre>public static double calibrateCameraExtended(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsIntrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsExtrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors)</pre>
- <div class="block">Finds the camera intrinsic and extrinsic parameters from several views of a calibration
- pattern.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - In the new interface it is a vector of vectors of calibration pattern points in
- the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer
- vector contains as many elements as the number of pattern views. If the same calibration pattern
- is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
- possible to use partially occluded patterns or even different patterns in different views. Then,
- the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
- XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
- In the old interface all the vectors of object points from different views are concatenated
- together.</dd>
- <dd><code>imagePoints</code> - In the new interface it is a vector of vectors of the projections of calibration
- pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and
- objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
- respectively. In the old interface all the vectors of object points from different views are
- concatenated together.</dd>
- <dd><code>imageSize</code> - Size of the image used only to initialize the camera intrinsic matrix.</dd>
- <dd><code>cameraMatrix</code> - Input/output 3x3 floating-point camera intrinsic matrix
- \(\cameramatrix{A}\) . If REF: CALIB_USE_INTRINSIC_GUESS
- and/or REF: CALIB_FIX_ASPECT_RATIO, REF: CALIB_FIX_PRINCIPAL_POINT or REF: CALIB_FIX_FOCAL_LENGTH
- are specified, some or all of fx, fy, cx, cy must be initialized before calling the function.</dd>
- <dd><code>distCoeffs</code> - Input/output vector of distortion coefficients
- \(\distcoeffs\).</dd>
- <dd><code>rvecs</code> - Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view
- (e.g. std::vector<cv::Mat>>). That is, each i-th rotation vector together with the corresponding
- i-th translation vector (see the next output parameter description) brings the calibration pattern
- from the object coordinate space (in which object points are specified) to the camera coordinate
- space. In more technical terms, the tuple of the i-th rotation and translation vector performs
- a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
- tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
- space.</dd>
- <dd><code>tvecs</code> - Output vector of translation vectors estimated for each pattern view, see parameter
- describtion above.</dd>
- <dd><code>stdDeviationsIntrinsics</code> - Output vector of standard deviations estimated for intrinsic
- parameters. Order of deviations values:
- \((f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
- s_4, \tau_x, \tau_y)\) If one of parameters is not estimated, it's deviation is equals to zero.</dd>
- <dd><code>stdDeviationsExtrinsics</code> - Output vector of standard deviations estimated for extrinsic
- parameters. Order of deviations values: \((R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\) where M is
- the number of pattern views. \(R_i, T_i\) are concatenated 1x3 vectors.</dd>
- <dd><code>perViewErrors</code> - Output vector of the RMS re-projection error estimated for each pattern view.
- <ul>
- <li>
- REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
- fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- center ( imageSize is used), and focal distances are computed in a least-squares fashion.
- Note, that if intrinsic parameters are known, there is no need to use this function just to
- estimate extrinsic parameters. Use REF: solvePnP instead.
- </li>
- <li>
- REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
- optimization. It stays at the center or at a different location specified when
- REF: CALIB_USE_INTRINSIC_GUESS is set too.
- </li>
- <li>
- REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
- ratio fx/fy stays the same as in the input cameraMatrix . When
- REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
- ignored, only their ratio is computed and used further.
- </li>
- <li>
- REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \((p_1, p_2)\) are set
- to zeros and stay zero.
- </li>
- <li>
- REF: CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if
- REF: CALIB_USE_INTRINSIC_GUESS is set.
- </li>
- <li>
- REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion
- coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is
- set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- <li>
- REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
- backward compatibility, this extra flag should be explicitly specified to make the
- calibration function use the rational model and return 8 coefficients or more.
- </li>
- <li>
- REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
- backward compatibility, this extra flag should be explicitly specified to make the
- calibration function use the thin prism model and return 12 coefficients or more.
- </li>
- <li>
- REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
- the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- <li>
- REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
- backward compatibility, this extra flag should be explicitly specified to make the
- calibration function use the tilted sensor model and return 14 coefficients.
- </li>
- <li>
- REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
- the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>the overall RMS re-projection error.
- The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
- views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object
- points and their corresponding 2D projections in each view must be specified. That may be achieved
- by using an object with known geometry and easily detectable feature points. Such an object is
- called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
- a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic
- parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
- patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
- be used as long as initial cameraMatrix is provided.
- The algorithm performs the following steps:
- <ul>
- <li>
- Compute the initial intrinsic parameters (the option only available for planar calibration
- patterns) or read them from the input parameters. The distortion coefficients are all set to
- zeros initially unless some of CALIB_FIX_K? are specified.
- </li>
- </ul>
- <ul>
- <li>
- Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
- done using REF: solvePnP .
- </li>
- </ul>
- <ul>
- <li>
- Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
- that is, the total sum of squared distances between the observed feature points imagePoints and
- the projected (using the current estimates for camera parameters and the poses) object points
- objectPoints. See REF: projectPoints for details.
- </li>
- </ul>
- <b>Note:</b>
- If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration,
- and REF: calibrateCamera returns bad values (zero distortion coefficients, \(c_x\) and
- \(c_y\) very far from the image center, and/or large differences between \(f_x\) and
- \(f_y\) (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
- instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners.
- SEE:
- calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
- undistort</dd>
- </dl>
- </li>
- </ul>
- <a name="calibrateCameraExtended-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrateCameraExtended</h4>
- <pre>public static double calibrateCameraExtended(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsIntrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsExtrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors,
- int flags)</pre>
- <div class="block">Finds the camera intrinsic and extrinsic parameters from several views of a calibration
- pattern.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - In the new interface it is a vector of vectors of calibration pattern points in
- the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer
- vector contains as many elements as the number of pattern views. If the same calibration pattern
- is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
- possible to use partially occluded patterns or even different patterns in different views. Then,
- the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
- XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
- In the old interface all the vectors of object points from different views are concatenated
- together.</dd>
- <dd><code>imagePoints</code> - In the new interface it is a vector of vectors of the projections of calibration
- pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and
- objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
- respectively. In the old interface all the vectors of object points from different views are
- concatenated together.</dd>
- <dd><code>imageSize</code> - Size of the image used only to initialize the camera intrinsic matrix.</dd>
- <dd><code>cameraMatrix</code> - Input/output 3x3 floating-point camera intrinsic matrix
- \(\cameramatrix{A}\) . If REF: CALIB_USE_INTRINSIC_GUESS
- and/or REF: CALIB_FIX_ASPECT_RATIO, REF: CALIB_FIX_PRINCIPAL_POINT or REF: CALIB_FIX_FOCAL_LENGTH
- are specified, some or all of fx, fy, cx, cy must be initialized before calling the function.</dd>
- <dd><code>distCoeffs</code> - Input/output vector of distortion coefficients
- \(\distcoeffs\).</dd>
- <dd><code>rvecs</code> - Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view
- (e.g. std::vector<cv::Mat>>). That is, each i-th rotation vector together with the corresponding
- i-th translation vector (see the next output parameter description) brings the calibration pattern
- from the object coordinate space (in which object points are specified) to the camera coordinate
- space. In more technical terms, the tuple of the i-th rotation and translation vector performs
- a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
- tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
- space.</dd>
- <dd><code>tvecs</code> - Output vector of translation vectors estimated for each pattern view, see parameter
- describtion above.</dd>
- <dd><code>stdDeviationsIntrinsics</code> - Output vector of standard deviations estimated for intrinsic
- parameters. Order of deviations values:
- \((f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
- s_4, \tau_x, \tau_y)\) If one of parameters is not estimated, it's deviation is equals to zero.</dd>
- <dd><code>stdDeviationsExtrinsics</code> - Output vector of standard deviations estimated for extrinsic
- parameters. Order of deviations values: \((R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\) where M is
- the number of pattern views. \(R_i, T_i\) are concatenated 1x3 vectors.</dd>
- <dd><code>perViewErrors</code> - Output vector of the RMS re-projection error estimated for each pattern view.</dd>
- <dd><code>flags</code> - Different flags that may be zero or a combination of the following values:
- <ul>
- <li>
- REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
- fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- center ( imageSize is used), and focal distances are computed in a least-squares fashion.
- Note, that if intrinsic parameters are known, there is no need to use this function just to
- estimate extrinsic parameters. Use REF: solvePnP instead.
- </li>
- <li>
- REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
- optimization. It stays at the center or at a different location specified when
- REF: CALIB_USE_INTRINSIC_GUESS is set too.
- </li>
- <li>
- REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
- ratio fx/fy stays the same as in the input cameraMatrix . When
- REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
- ignored, only their ratio is computed and used further.
- </li>
- <li>
- REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \((p_1, p_2)\) are set
- to zeros and stay zero.
- </li>
- <li>
- REF: CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if
- REF: CALIB_USE_INTRINSIC_GUESS is set.
- </li>
- <li>
- REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion
- coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is
- set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- <li>
- REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
- backward compatibility, this extra flag should be explicitly specified to make the
- calibration function use the rational model and return 8 coefficients or more.
- </li>
- <li>
- REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
- backward compatibility, this extra flag should be explicitly specified to make the
- calibration function use the thin prism model and return 12 coefficients or more.
- </li>
- <li>
- REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
- the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- <li>
- REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
- backward compatibility, this extra flag should be explicitly specified to make the
- calibration function use the tilted sensor model and return 14 coefficients.
- </li>
- <li>
- REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
- the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>the overall RMS re-projection error.
- The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
- views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object
- points and their corresponding 2D projections in each view must be specified. That may be achieved
- by using an object with known geometry and easily detectable feature points. Such an object is
- called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
- a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic
- parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
- patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
- be used as long as initial cameraMatrix is provided.
- The algorithm performs the following steps:
- <ul>
- <li>
- Compute the initial intrinsic parameters (the option only available for planar calibration
- patterns) or read them from the input parameters. The distortion coefficients are all set to
- zeros initially unless some of CALIB_FIX_K? are specified.
- </li>
- </ul>
- <ul>
- <li>
- Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
- done using REF: solvePnP .
- </li>
- </ul>
- <ul>
- <li>
- Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
- that is, the total sum of squared distances between the observed feature points imagePoints and
- the projected (using the current estimates for camera parameters and the poses) object points
- objectPoints. See REF: projectPoints for details.
- </li>
- </ul>
- <b>Note:</b>
- If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration,
- and REF: calibrateCamera returns bad values (zero distortion coefficients, \(c_x\) and
- \(c_y\) very far from the image center, and/or large differences between \(f_x\) and
- \(f_y\) (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
- instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners.
- SEE:
- calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
- undistort</dd>
- </dl>
- </li>
- </ul>
- <a name="calibrateCameraExtended-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-org.opencv.core.TermCriteria-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrateCameraExtended</h4>
- <pre>public static double calibrateCameraExtended(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsIntrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsExtrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</pre>
- <div class="block">Finds the camera intrinsic and extrinsic parameters from several views of a calibration
- pattern.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - In the new interface it is a vector of vectors of calibration pattern points in
- the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer
- vector contains as many elements as the number of pattern views. If the same calibration pattern
- is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
- possible to use partially occluded patterns or even different patterns in different views. Then,
- the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
- XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
- In the old interface all the vectors of object points from different views are concatenated
- together.</dd>
- <dd><code>imagePoints</code> - In the new interface it is a vector of vectors of the projections of calibration
- pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and
- objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
- respectively. In the old interface all the vectors of object points from different views are
- concatenated together.</dd>
- <dd><code>imageSize</code> - Size of the image used only to initialize the camera intrinsic matrix.</dd>
- <dd><code>cameraMatrix</code> - Input/output 3x3 floating-point camera intrinsic matrix
- \(\cameramatrix{A}\) . If REF: CALIB_USE_INTRINSIC_GUESS
- and/or REF: CALIB_FIX_ASPECT_RATIO, REF: CALIB_FIX_PRINCIPAL_POINT or REF: CALIB_FIX_FOCAL_LENGTH
- are specified, some or all of fx, fy, cx, cy must be initialized before calling the function.</dd>
- <dd><code>distCoeffs</code> - Input/output vector of distortion coefficients
- \(\distcoeffs\).</dd>
- <dd><code>rvecs</code> - Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view
- (e.g. std::vector<cv::Mat>>). That is, each i-th rotation vector together with the corresponding
- i-th translation vector (see the next output parameter description) brings the calibration pattern
- from the object coordinate space (in which object points are specified) to the camera coordinate
- space. In more technical terms, the tuple of the i-th rotation and translation vector performs
- a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
- tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
- space.</dd>
- <dd><code>tvecs</code> - Output vector of translation vectors estimated for each pattern view, see parameter
- describtion above.</dd>
- <dd><code>stdDeviationsIntrinsics</code> - Output vector of standard deviations estimated for intrinsic
- parameters. Order of deviations values:
- \((f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
- s_4, \tau_x, \tau_y)\) If one of parameters is not estimated, it's deviation is equals to zero.</dd>
- <dd><code>stdDeviationsExtrinsics</code> - Output vector of standard deviations estimated for extrinsic
- parameters. Order of deviations values: \((R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\) where M is
- the number of pattern views. \(R_i, T_i\) are concatenated 1x3 vectors.</dd>
- <dd><code>perViewErrors</code> - Output vector of the RMS re-projection error estimated for each pattern view.</dd>
- <dd><code>flags</code> - Different flags that may be zero or a combination of the following values:
- <ul>
- <li>
- REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
- fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- center ( imageSize is used), and focal distances are computed in a least-squares fashion.
- Note, that if intrinsic parameters are known, there is no need to use this function just to
- estimate extrinsic parameters. Use REF: solvePnP instead.
- </li>
- <li>
- REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
- optimization. It stays at the center or at a different location specified when
- REF: CALIB_USE_INTRINSIC_GUESS is set too.
- </li>
- <li>
- REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
- ratio fx/fy stays the same as in the input cameraMatrix . When
- REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
- ignored, only their ratio is computed and used further.
- </li>
- <li>
- REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \((p_1, p_2)\) are set
- to zeros and stay zero.
- </li>
- <li>
- REF: CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if
- REF: CALIB_USE_INTRINSIC_GUESS is set.
- </li>
- <li>
- REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion
- coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is
- set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- <li>
- REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
- backward compatibility, this extra flag should be explicitly specified to make the
- calibration function use the rational model and return 8 coefficients or more.
- </li>
- <li>
- REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
- backward compatibility, this extra flag should be explicitly specified to make the
- calibration function use the thin prism model and return 12 coefficients or more.
- </li>
- <li>
- REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
- the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- <li>
- REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
- backward compatibility, this extra flag should be explicitly specified to make the
- calibration function use the tilted sensor model and return 14 coefficients.
- </li>
- <li>
- REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
- the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- </ul></dd>
- <dd><code>criteria</code> - Termination criteria for the iterative optimization algorithm.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>the overall RMS re-projection error.
- The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
- views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object
- points and their corresponding 2D projections in each view must be specified. That may be achieved
- by using an object with known geometry and easily detectable feature points. Such an object is
- called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
- a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic
- parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
- patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
- be used as long as initial cameraMatrix is provided.
- The algorithm performs the following steps:
- <ul>
- <li>
- Compute the initial intrinsic parameters (the option only available for planar calibration
- patterns) or read them from the input parameters. The distortion coefficients are all set to
- zeros initially unless some of CALIB_FIX_K? are specified.
- </li>
- </ul>
- <ul>
- <li>
- Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
- done using REF: solvePnP .
- </li>
- </ul>
- <ul>
- <li>
- Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
- that is, the total sum of squared distances between the observed feature points imagePoints and
- the projected (using the current estimates for camera parameters and the poses) object points
- objectPoints. See REF: projectPoints for details.
- </li>
- </ul>
- <b>Note:</b>
- If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration,
- and REF: calibrateCamera returns bad values (zero distortion coefficients, \(c_x\) and
- \(c_y\) very far from the image center, and/or large differences between \(f_x\) and
- \(f_y\) (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
- instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners.
- SEE:
- calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
- undistort</dd>
- </dl>
- </li>
- </ul>
- <a name="calibrateCameraRO-java.util.List-java.util.List-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrateCameraRO</h4>
- <pre>public static double calibrateCameraRO(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- int iFixedPoint,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newObjPoints)</pre>
- </li>
- </ul>
- <a name="calibrateCameraRO-java.util.List-java.util.List-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrateCameraRO</h4>
- <pre>public static double calibrateCameraRO(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- int iFixedPoint,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newObjPoints,
- int flags)</pre>
- </li>
- </ul>
- <a name="calibrateCameraRO-java.util.List-java.util.List-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-int-org.opencv.core.TermCriteria-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrateCameraRO</h4>
- <pre>public static double calibrateCameraRO(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- int iFixedPoint,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newObjPoints,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</pre>
- </li>
- </ul>
- <a name="calibrateCameraROExtended-java.util.List-java.util.List-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrateCameraROExtended</h4>
- <pre>public static double calibrateCameraROExtended(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- int iFixedPoint,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newObjPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsIntrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsExtrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsObjPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors)</pre>
- <div class="block">Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
- This function is an extension of #calibrateCamera with the method of releasing object which was
- proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
- targets (calibration plates), this method can dramatically improve the precision of the estimated
- camera parameters. Both the object-releasing method and standard method are supported by this
- function. Use the parameter <b>iFixedPoint</b> for method selection. In the internal implementation,
- #calibrateCamera is a wrapper for this function.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Vector of vectors of calibration pattern points in the calibration pattern
- coordinate space. See #calibrateCamera for details. If the method of releasing object to be used,
- the identical calibration board must be used in each view and it must be fully visible, and all
- objectPoints[i] must be the same and all points should be roughly close to a plane. <b>The calibration
- target has to be rigid, or at least static if the camera (rather than the calibration target) is
- shifted for grabbing images.</b></dd>
- <dd><code>imagePoints</code> - Vector of vectors of the projections of calibration pattern points. See
- #calibrateCamera for details.</dd>
- <dd><code>imageSize</code> - Size of the image used only to initialize the intrinsic camera matrix.</dd>
- <dd><code>iFixedPoint</code> - The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
- a switch for calibration method selection. If object-releasing method to be used, pass in the
- parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
- make standard calibration method selected. Usually the top-right corner point of the calibration
- board grid is recommended to be fixed when object-releasing method being utilized. According to
- \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
- and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
- newObjPoints are only possible if coordinates of these three fixed points are accurate enough.</dd>
- <dd><code>cameraMatrix</code> - Output 3x3 floating-point camera matrix. See #calibrateCamera for details.</dd>
- <dd><code>distCoeffs</code> - Output vector of distortion coefficients. See #calibrateCamera for details.</dd>
- <dd><code>rvecs</code> - Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera
- for details.</dd>
- <dd><code>tvecs</code> - Output vector of translation vectors estimated for each pattern view.</dd>
- <dd><code>newObjPoints</code> - The updated output vector of calibration pattern points. The coordinates might
- be scaled based on three fixed points. The returned coordinates are accurate only if the above
- mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
- is ignored with standard calibration method.</dd>
- <dd><code>stdDeviationsIntrinsics</code> - Output vector of standard deviations estimated for intrinsic parameters.
- See #calibrateCamera for details.</dd>
- <dd><code>stdDeviationsExtrinsics</code> - Output vector of standard deviations estimated for extrinsic parameters.
- See #calibrateCamera for details.</dd>
- <dd><code>stdDeviationsObjPoints</code> - Output vector of standard deviations estimated for refined coordinates
- of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
- parameter is ignored with standard calibration method.</dd>
- <dd><code>perViewErrors</code> - Output vector of the RMS re-projection error estimated for each pattern view.
- #calibrateCamera for details. If the method of releasing object is used, the calibration time may
- be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
- less precise and less stable in some rare cases.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>the overall RMS re-projection error.
- The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
- views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See
- #calibrateCamera for other detailed explanations.
- SEE:
- calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort</dd>
- </dl>
- </li>
- </ul>
- <a name="calibrateCameraROExtended-java.util.List-java.util.List-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrateCameraROExtended</h4>
- <pre>public static double calibrateCameraROExtended(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- int iFixedPoint,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newObjPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsIntrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsExtrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsObjPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors,
- int flags)</pre>
- <div class="block">Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
- This function is an extension of #calibrateCamera with the method of releasing object which was
- proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
- targets (calibration plates), this method can dramatically improve the precision of the estimated
- camera parameters. Both the object-releasing method and standard method are supported by this
- function. Use the parameter <b>iFixedPoint</b> for method selection. In the internal implementation,
- #calibrateCamera is a wrapper for this function.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Vector of vectors of calibration pattern points in the calibration pattern
- coordinate space. See #calibrateCamera for details. If the method of releasing object to be used,
- the identical calibration board must be used in each view and it must be fully visible, and all
- objectPoints[i] must be the same and all points should be roughly close to a plane. <b>The calibration
- target has to be rigid, or at least static if the camera (rather than the calibration target) is
- shifted for grabbing images.</b></dd>
- <dd><code>imagePoints</code> - Vector of vectors of the projections of calibration pattern points. See
- #calibrateCamera for details.</dd>
- <dd><code>imageSize</code> - Size of the image used only to initialize the intrinsic camera matrix.</dd>
- <dd><code>iFixedPoint</code> - The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
- a switch for calibration method selection. If object-releasing method to be used, pass in the
- parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
- make standard calibration method selected. Usually the top-right corner point of the calibration
- board grid is recommended to be fixed when object-releasing method being utilized. According to
- \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
- and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
- newObjPoints are only possible if coordinates of these three fixed points are accurate enough.</dd>
- <dd><code>cameraMatrix</code> - Output 3x3 floating-point camera matrix. See #calibrateCamera for details.</dd>
- <dd><code>distCoeffs</code> - Output vector of distortion coefficients. See #calibrateCamera for details.</dd>
- <dd><code>rvecs</code> - Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera
- for details.</dd>
- <dd><code>tvecs</code> - Output vector of translation vectors estimated for each pattern view.</dd>
- <dd><code>newObjPoints</code> - The updated output vector of calibration pattern points. The coordinates might
- be scaled based on three fixed points. The returned coordinates are accurate only if the above
- mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
- is ignored with standard calibration method.</dd>
- <dd><code>stdDeviationsIntrinsics</code> - Output vector of standard deviations estimated for intrinsic parameters.
- See #calibrateCamera for details.</dd>
- <dd><code>stdDeviationsExtrinsics</code> - Output vector of standard deviations estimated for extrinsic parameters.
- See #calibrateCamera for details.</dd>
- <dd><code>stdDeviationsObjPoints</code> - Output vector of standard deviations estimated for refined coordinates
- of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
- parameter is ignored with standard calibration method.</dd>
- <dd><code>perViewErrors</code> - Output vector of the RMS re-projection error estimated for each pattern view.</dd>
- <dd><code>flags</code> - Different flags that may be zero or a combination of some predefined values. See
- #calibrateCamera for details. If the method of releasing object is used, the calibration time may
- be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
- less precise and less stable in some rare cases.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>the overall RMS re-projection error.
- The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
- views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See
- #calibrateCamera for other detailed explanations.
- SEE:
- calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort</dd>
- </dl>
- </li>
- </ul>
- <a name="calibrateCameraROExtended-java.util.List-java.util.List-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-org.opencv.core.TermCriteria-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrateCameraROExtended</h4>
- <pre>public static double calibrateCameraROExtended(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- int iFixedPoint,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newObjPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsIntrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsExtrinsics,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> stdDeviationsObjPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</pre>
- <div class="block">Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
- This function is an extension of #calibrateCamera with the method of releasing object which was
- proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
- targets (calibration plates), this method can dramatically improve the precision of the estimated
- camera parameters. Both the object-releasing method and standard method are supported by this
- function. Use the parameter <b>iFixedPoint</b> for method selection. In the internal implementation,
- #calibrateCamera is a wrapper for this function.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Vector of vectors of calibration pattern points in the calibration pattern
- coordinate space. See #calibrateCamera for details. If the method of releasing object to be used,
- the identical calibration board must be used in each view and it must be fully visible, and all
- objectPoints[i] must be the same and all points should be roughly close to a plane. <b>The calibration
- target has to be rigid, or at least static if the camera (rather than the calibration target) is
- shifted for grabbing images.</b></dd>
- <dd><code>imagePoints</code> - Vector of vectors of the projections of calibration pattern points. See
- #calibrateCamera for details.</dd>
- <dd><code>imageSize</code> - Size of the image used only to initialize the intrinsic camera matrix.</dd>
- <dd><code>iFixedPoint</code> - The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
- a switch for calibration method selection. If object-releasing method to be used, pass in the
- parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
- make standard calibration method selected. Usually the top-right corner point of the calibration
- board grid is recommended to be fixed when object-releasing method being utilized. According to
- \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
- and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
- newObjPoints are only possible if coordinates of these three fixed points are accurate enough.</dd>
- <dd><code>cameraMatrix</code> - Output 3x3 floating-point camera matrix. See #calibrateCamera for details.</dd>
- <dd><code>distCoeffs</code> - Output vector of distortion coefficients. See #calibrateCamera for details.</dd>
- <dd><code>rvecs</code> - Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera
- for details.</dd>
- <dd><code>tvecs</code> - Output vector of translation vectors estimated for each pattern view.</dd>
- <dd><code>newObjPoints</code> - The updated output vector of calibration pattern points. The coordinates might
- be scaled based on three fixed points. The returned coordinates are accurate only if the above
- mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
- is ignored with standard calibration method.</dd>
- <dd><code>stdDeviationsIntrinsics</code> - Output vector of standard deviations estimated for intrinsic parameters.
- See #calibrateCamera for details.</dd>
- <dd><code>stdDeviationsExtrinsics</code> - Output vector of standard deviations estimated for extrinsic parameters.
- See #calibrateCamera for details.</dd>
- <dd><code>stdDeviationsObjPoints</code> - Output vector of standard deviations estimated for refined coordinates
- of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
- parameter is ignored with standard calibration method.</dd>
- <dd><code>perViewErrors</code> - Output vector of the RMS re-projection error estimated for each pattern view.</dd>
- <dd><code>flags</code> - Different flags that may be zero or a combination of some predefined values. See
- #calibrateCamera for details. If the method of releasing object is used, the calibration time may
- be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
- less precise and less stable in some rare cases.</dd>
- <dd><code>criteria</code> - Termination criteria for the iterative optimization algorithm.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>the overall RMS re-projection error.
- The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
- views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See
- #calibrateCamera for other detailed explanations.
- SEE:
- calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort</dd>
- </dl>
- </li>
- </ul>
- <a name="calibrateHandEye-java.util.List-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrateHandEye</h4>
- <pre>public static void calibrateHandEye(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> R_gripper2base,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> t_gripper2base,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> R_target2cam,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> t_target2cam,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R_cam2gripper,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t_cam2gripper)</pre>
- <div class="block">Computes Hand-Eye calibration: \(_{}^{g}\textrm{T}_c\)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>R_gripper2base</code> - Rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)).
- This is a vector (<code>vector&lt;Mat&gt;</code>) that contains the rotation, <code>(3x3)</code> rotation matrices or <code>(3x1)</code> rotation vectors,
- for all the transformations from gripper frame to robot base frame.</dd>
- <dd><code>t_gripper2base</code> - Translation part extracted from the homogeneous matrix that transforms a point
- expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)).
- This is a vector (<code>vector&lt;Mat&gt;</code>) that contains the <code>(3x1)</code> translation vectors for all the transformations
- from gripper frame to robot base frame.</dd>
- <dd><code>R_target2cam</code> - Rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)).
- This is a vector (<code>vector&lt;Mat&gt;</code>) that contains the rotation, <code>(3x3)</code> rotation matrices or <code>(3x1)</code> rotation vectors,
- for all the transformations from calibration target frame to camera frame.</dd>
- <dd><code>t_target2cam</code> - Rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)).
- This is a vector (<code>vector&lt;Mat&gt;</code>) that contains the <code>(3x1)</code> translation vectors for all the transformations
- from calibration target frame to camera frame.</dd>
- <dd><code>R_cam2gripper</code> - Estimated <code>(3x3)</code> rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)).</dd>
- <dd><code>t_cam2gripper</code> - Estimated <code>(3x1)</code> translation part extracted from the homogeneous matrix that transforms a point
- expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)).
- The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the
- rotation then the translation (separable solutions) and the following methods are implemented:
- <ul>
- <li>
- R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89
- </li>
- <li>
- F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94
- </li>
- <li>
- R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95
- </li>
- </ul>
- Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
- with the following implemented methods:
- <ul>
- <li>
- N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99
- </li>
- <li>
- K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98
- </li>
- </ul>
- The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye")
- mounted on a robot gripper ("hand") has to be estimated. This configuration is called eye-in-hand.
- The eye-to-hand configuration consists in a static camera observing a calibration pattern mounted on the robot
- end-effector. The transformation from the camera to the robot base frame can then be estimated by inputting
- the suitable transformations to the function, see below.
- 
- The calibration procedure is the following:
- <ul>
- <li>
- a static calibration pattern is used to estimate the transformation between the target frame
- and the camera frame
- </li>
- <li>
- the robot gripper is moved in order to acquire several poses
- </li>
- <li>
- for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
- instance the robot kinematics
- \(
- \begin{bmatrix}
- X_b\\
- Y_b\\
- Z_b\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{b}\textrm{R}_g & _{}^{b}\textrm{t}_g \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_g\\
- Y_g\\
- Z_g\\
- 1
- \end{bmatrix}
- \)
- </li>
- <li>
- for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using
- for instance a pose estimation method (PnP) from 2D-3D point correspondences
- \(
- \begin{bmatrix}
- X_c\\
- Y_c\\
- Z_c\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{c}\textrm{R}_t & _{}^{c}\textrm{t}_t \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_t\\
- Y_t\\
- Z_t\\
- 1
- \end{bmatrix}
- \)
- </li>
- </ul>
- The Hand-Eye calibration procedure returns the following homogeneous transformation
- \(
- \begin{bmatrix}
- X_g\\
- Y_g\\
- Z_g\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{g}\textrm{R}_c & _{}^{g}\textrm{t}_c \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_c\\
- Y_c\\
- Z_c\\
- 1
- \end{bmatrix}
- \)
- This problem is also known as solving the \(\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}\) equation:
- <ul>
- <li>
- for an eye-in-hand configuration
- \(
- \begin{align*}
- ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &=
- \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
- </li>
- </ul>
- (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &=
- \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
- \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\
- \end{align*}
- \)
- <ul>
- <li>
- for an eye-to-hand configuration
- \(
- \begin{align*}
- ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &=
- \hspace{0.1em} ^{g}{\textrm{T}_b}^{(2)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
- </li>
- </ul>
- (^{g}{\textrm{T}_b}^{(2)})^{-1} \hspace{0.2em} ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c &=
- \hspace{0.1em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
- \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\
- \end{align*}
- \)
- \note
- Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration).
- \note
- A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation.
- So at least 3 different poses are required, but it is strongly recommended to use many more poses.</dd>
- </dl>
- </li>
- </ul>
- <a name="calibrateHandEye-java.util.List-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrateHandEye</h4>
- <pre>public static void calibrateHandEye(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> R_gripper2base,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> t_gripper2base,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> R_target2cam,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> t_target2cam,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R_cam2gripper,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t_cam2gripper,
- int method)</pre>
- <div class="block">Computes Hand-Eye calibration: \(_{}^{g}\textrm{T}_c\)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>R_gripper2base</code> - Rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)).
- This is a vector (<code>vector&lt;Mat&gt;</code>) that contains the rotation, <code>(3x3)</code> rotation matrices or <code>(3x1)</code> rotation vectors,
- for all the transformations from gripper frame to robot base frame.</dd>
- <dd><code>t_gripper2base</code> - Translation part extracted from the homogeneous matrix that transforms a point
- expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)).
- This is a vector (<code>vector&lt;Mat&gt;</code>) that contains the <code>(3x1)</code> translation vectors for all the transformations
- from gripper frame to robot base frame.</dd>
- <dd><code>R_target2cam</code> - Rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)).
- This is a vector (<code>vector&lt;Mat&gt;</code>) that contains the rotation, <code>(3x3)</code> rotation matrices or <code>(3x1)</code> rotation vectors,
- for all the transformations from calibration target frame to camera frame.</dd>
- <dd><code>t_target2cam</code> - Rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)).
- This is a vector (<code>vector&lt;Mat&gt;</code>) that contains the <code>(3x1)</code> translation vectors for all the transformations
- from calibration target frame to camera frame.</dd>
- <dd><code>R_cam2gripper</code> - Estimated <code>(3x3)</code> rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)).</dd>
- <dd><code>t_cam2gripper</code> - Estimated <code>(3x1)</code> translation part extracted from the homogeneous matrix that transforms a point
- expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)).</dd>
- <dd><code>method</code> - One of the implemented Hand-Eye calibration method, see cv::HandEyeCalibrationMethod
- The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the
- rotation then the translation (separable solutions) and the following methods are implemented:
- <ul>
- <li>
- R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89
- </li>
- <li>
- F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94
- </li>
- <li>
- R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95
- </li>
- </ul>
- Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
- with the following implemented methods:
- <ul>
- <li>
- N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99
- </li>
- <li>
- K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98
- </li>
- </ul>
- The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye")
- mounted on a robot gripper ("hand") has to be estimated. This configuration is called eye-in-hand.
- The eye-to-hand configuration consists in a static camera observing a calibration pattern mounted on the robot
- end-effector. The transformation from the camera to the robot base frame can then be estimated by inputting
- the suitable transformations to the function, see below.
- 
- The calibration procedure is the following:
- <ul>
- <li>
- a static calibration pattern is used to estimate the transformation between the target frame
- and the camera frame
- </li>
- <li>
- the robot gripper is moved in order to acquire several poses
- </li>
- <li>
- for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
- instance the robot kinematics
- \(
- \begin{bmatrix}
- X_b\\
- Y_b\\
- Z_b\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{b}\textrm{R}_g & _{}^{b}\textrm{t}_g \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_g\\
- Y_g\\
- Z_g\\
- 1
- \end{bmatrix}
- \)
- </li>
- <li>
- for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using
- for instance a pose estimation method (PnP) from 2D-3D point correspondences
- \(
- \begin{bmatrix}
- X_c\\
- Y_c\\
- Z_c\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{c}\textrm{R}_t & _{}^{c}\textrm{t}_t \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_t\\
- Y_t\\
- Z_t\\
- 1
- \end{bmatrix}
- \)
- </li>
- </ul>
- The Hand-Eye calibration procedure returns the following homogeneous transformation
- \(
- \begin{bmatrix}
- X_g\\
- Y_g\\
- Z_g\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{g}\textrm{R}_c & _{}^{g}\textrm{t}_c \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_c\\
- Y_c\\
- Z_c\\
- 1
- \end{bmatrix}
- \)
- This problem is also known as solving the \(\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}\) equation:
- <ul>
- <li>
- for an eye-in-hand configuration
- \(
- \begin{align*}
- ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &=
- \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
- </li>
- </ul>
- (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &=
- \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
- \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\
- \end{align*}
- \)
- <ul>
- <li>
- for an eye-to-hand configuration
- \(
- \begin{align*}
- ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &=
- \hspace{0.1em} ^{g}{\textrm{T}_b}^{(2)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
- </li>
- </ul>
- (^{g}{\textrm{T}_b}^{(2)})^{-1} \hspace{0.2em} ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c &=
- \hspace{0.1em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
- \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\
- \end{align*}
- \)
- \note
- Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration).
- \note
- A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation.
- So at least 3 different poses are required, but it is strongly recommended to use many more poses.</dd>
- </dl>
- </li>
- </ul>
- <a name="calibrateRobotWorldHandEye-java.util.List-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrateRobotWorldHandEye</h4>
- <pre>public static void calibrateRobotWorldHandEye(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> R_world2cam,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> t_world2cam,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> R_base2gripper,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> t_base2gripper,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R_base2world,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t_base2world,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R_gripper2cam,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t_gripper2cam)</pre>
- <div class="block">Computes Robot-World/Hand-Eye calibration: \(_{}^{w}\textrm{T}_b\) and \(_{}^{c}\textrm{T}_g\)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>R_world2cam</code> - Rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the world frame to the camera frame (\(_{}^{c}\textrm{T}_w\)).
- This is a vector (<code>vector&lt;Mat&gt;</code>) that contains the rotation, <code>(3x3)</code> rotation matrices or <code>(3x1)</code> rotation vectors,
- for all the transformations from world frame to the camera frame.</dd>
- <dd><code>t_world2cam</code> - Translation part extracted from the homogeneous matrix that transforms a point
- expressed in the world frame to the camera frame (\(_{}^{c}\textrm{T}_w\)).
- This is a vector (<code>vector&lt;Mat&gt;</code>) that contains the <code>(3x1)</code> translation vectors for all the transformations
- from world frame to the camera frame.</dd>
- <dd><code>R_base2gripper</code> - Rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the robot base frame to the gripper frame (\(_{}^{g}\textrm{T}_b\)).
- This is a vector (<code>vector&lt;Mat&gt;</code>) that contains the rotation, <code>(3x3)</code> rotation matrices or <code>(3x1)</code> rotation vectors,
- for all the transformations from robot base frame to the gripper frame.</dd>
- <dd><code>t_base2gripper</code> - Rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the robot base frame to the gripper frame (\(_{}^{g}\textrm{T}_b\)).
- This is a vector (<code>vector&lt;Mat&gt;</code>) that contains the <code>(3x1)</code> translation vectors for all the transformations
- from robot base frame to the gripper frame.</dd>
- <dd><code>R_base2world</code> - Estimated <code>(3x3)</code> rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the robot base frame to the world frame (\(_{}^{w}\textrm{T}_b\)).</dd>
- <dd><code>t_base2world</code> - Estimated <code>(3x1)</code> translation part extracted from the homogeneous matrix that transforms a point
- expressed in the robot base frame to the world frame (\(_{}^{w}\textrm{T}_b\)).</dd>
- <dd><code>R_gripper2cam</code> - Estimated <code>(3x3)</code> rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the gripper frame to the camera frame (\(_{}^{c}\textrm{T}_g\)).</dd>
- <dd><code>t_gripper2cam</code> - Estimated <code>(3x1)</code> translation part extracted from the homogeneous matrix that transforms a point
- expressed in the gripper frame to the camera frame (\(_{}^{c}\textrm{T}_g\)).
- The function performs the Robot-World/Hand-Eye calibration using various methods. One approach consists in estimating the
- rotation then the translation (separable solutions):
- <ul>
- <li>
- M. Shah, Solving the robot-world/hand-eye calibration problem using the kronecker product \cite Shah2013SolvingTR
- </li>
- </ul>
- Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
- with the following implemented method:
- <ul>
- <li>
- A. Li, L. Wang, and D. Wu, Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product \cite Li2010SimultaneousRA
- </li>
- </ul>
- The following picture describes the Robot-World/Hand-Eye calibration problem where the transformations between a robot and a world frame
- and between a robot gripper ("hand") and a camera ("eye") mounted at the robot end-effector have to be estimated.
- 
- The calibration procedure is the following:
- <ul>
- <li>
- a static calibration pattern is used to estimate the transformation between the target frame
- and the camera frame
- </li>
- <li>
- the robot gripper is moved in order to acquire several poses
- </li>
- <li>
- for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
- instance the robot kinematics
- \(
- \begin{bmatrix}
- X_g\\
- Y_g\\
- Z_g\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{g}\textrm{R}_b & _{}^{g}\textrm{t}_b \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_b\\
- Y_b\\
- Z_b\\
- 1
- \end{bmatrix}
- \)
- </li>
- <li>
- for each pose, the homogeneous transformation between the calibration target frame (the world frame) and the camera frame is recorded using
- for instance a pose estimation method (PnP) from 2D-3D point correspondences
- \(
- \begin{bmatrix}
- X_c\\
- Y_c\\
- Z_c\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{c}\textrm{R}_w & _{}^{c}\textrm{t}_w \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_w\\
- Y_w\\
- Z_w\\
- 1
- \end{bmatrix}
- \)
- </li>
- </ul>
- The Robot-World/Hand-Eye calibration procedure returns the following homogeneous transformations
- \(
- \begin{bmatrix}
- X_w\\
- Y_w\\
- Z_w\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{w}\textrm{R}_b & _{}^{w}\textrm{t}_b \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_b\\
- Y_b\\
- Z_b\\
- 1
- \end{bmatrix}
- \)
- \(
- \begin{bmatrix}
- X_c\\
- Y_c\\
- Z_c\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{c}\textrm{R}_g & _{}^{c}\textrm{t}_g \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_g\\
- Y_g\\
- Z_g\\
- 1
- \end{bmatrix}
- \)
- This problem is also known as solving the \(\mathbf{A}\mathbf{X}=\mathbf{Z}\mathbf{B}\) equation, with:
- <ul>
- <li>
- \(\mathbf{A} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_w\)
- </li>
- <li>
- \(\mathbf{X} \Leftrightarrow \hspace{0.1em} _{}^{w}\textrm{T}_b\)
- </li>
- <li>
- \(\mathbf{Z} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_g\)
- </li>
- <li>
- \(\mathbf{B} \Leftrightarrow \hspace{0.1em} _{}^{g}\textrm{T}_b\)
- </li>
- </ul>
- \note
- At least 3 measurements are required (input vectors size must be greater or equal to 3).</dd>
- </dl>
- </li>
- </ul>
- <a name="calibrateRobotWorldHandEye-java.util.List-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrateRobotWorldHandEye</h4>
- <pre>public static void calibrateRobotWorldHandEye(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> R_world2cam,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> t_world2cam,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> R_base2gripper,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> t_base2gripper,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R_base2world,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t_base2world,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R_gripper2cam,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t_gripper2cam,
- int method)</pre>
- <div class="block">Computes Robot-World/Hand-Eye calibration: \(_{}^{w}\textrm{T}_b\) and \(_{}^{c}\textrm{T}_g\)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>R_world2cam</code> - Rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the world frame to the camera frame (\(_{}^{c}\textrm{T}_w\)).
- This is a vector (<code>vector&lt;Mat&gt;</code>) that contains the rotation, <code>(3x3)</code> rotation matrices or <code>(3x1)</code> rotation vectors,
- for all the transformations from world frame to the camera frame.</dd>
- <dd><code>t_world2cam</code> - Translation part extracted from the homogeneous matrix that transforms a point
- expressed in the world frame to the camera frame (\(_{}^{c}\textrm{T}_w\)).
- This is a vector (<code>vector&lt;Mat&gt;</code>) that contains the <code>(3x1)</code> translation vectors for all the transformations
- from world frame to the camera frame.</dd>
- <dd><code>R_base2gripper</code> - Rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the robot base frame to the gripper frame (\(_{}^{g}\textrm{T}_b\)).
- This is a vector (<code>vector&lt;Mat&gt;</code>) that contains the rotation, <code>(3x3)</code> rotation matrices or <code>(3x1)</code> rotation vectors,
- for all the transformations from robot base frame to the gripper frame.</dd>
- <dd><code>t_base2gripper</code> - Rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the robot base frame to the gripper frame (\(_{}^{g}\textrm{T}_b\)).
- This is a vector (<code>vector&lt;Mat&gt;</code>) that contains the <code>(3x1)</code> translation vectors for all the transformations
- from robot base frame to the gripper frame.</dd>
- <dd><code>R_base2world</code> - Estimated <code>(3x3)</code> rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the robot base frame to the world frame (\(_{}^{w}\textrm{T}_b\)).</dd>
- <dd><code>t_base2world</code> - Estimated <code>(3x1)</code> translation part extracted from the homogeneous matrix that transforms a point
- expressed in the robot base frame to the world frame (\(_{}^{w}\textrm{T}_b\)).</dd>
- <dd><code>R_gripper2cam</code> - Estimated <code>(3x3)</code> rotation part extracted from the homogeneous matrix that transforms a point
- expressed in the gripper frame to the camera frame (\(_{}^{c}\textrm{T}_g\)).</dd>
- <dd><code>t_gripper2cam</code> - Estimated <code>(3x1)</code> translation part extracted from the homogeneous matrix that transforms a point
- expressed in the gripper frame to the camera frame (\(_{}^{c}\textrm{T}_g\)).</dd>
- <dd><code>method</code> - One of the implemented Robot-World/Hand-Eye calibration method, see cv::RobotWorldHandEyeCalibrationMethod
- The function performs the Robot-World/Hand-Eye calibration using various methods. One approach consists in estimating the
- rotation then the translation (separable solutions):
- <ul>
- <li>
- M. Shah, Solving the robot-world/hand-eye calibration problem using the kronecker product \cite Shah2013SolvingTR
- </li>
- </ul>
- Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
- with the following implemented method:
- <ul>
- <li>
- A. Li, L. Wang, and D. Wu, Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product \cite Li2010SimultaneousRA
- </li>
- </ul>
- The following picture describes the Robot-World/Hand-Eye calibration problem where the transformations between a robot and a world frame
- and between a robot gripper ("hand") and a camera ("eye") mounted at the robot end-effector have to be estimated.
- 
- The calibration procedure is the following:
- <ul>
- <li>
- a static calibration pattern is used to estimate the transformation between the target frame
- and the camera frame
- </li>
- <li>
- the robot gripper is moved in order to acquire several poses
- </li>
- <li>
- for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
- instance the robot kinematics
- \(
- \begin{bmatrix}
- X_g\\
- Y_g\\
- Z_g\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{g}\textrm{R}_b & _{}^{g}\textrm{t}_b \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_b\\
- Y_b\\
- Z_b\\
- 1
- \end{bmatrix}
- \)
- </li>
- <li>
- for each pose, the homogeneous transformation between the calibration target frame (the world frame) and the camera frame is recorded using
- for instance a pose estimation method (PnP) from 2D-3D point correspondences
- \(
- \begin{bmatrix}
- X_c\\
- Y_c\\
- Z_c\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{c}\textrm{R}_w & _{}^{c}\textrm{t}_w \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_w\\
- Y_w\\
- Z_w\\
- 1
- \end{bmatrix}
- \)
- </li>
- </ul>
- The Robot-World/Hand-Eye calibration procedure returns the following homogeneous transformations
- \(
- \begin{bmatrix}
- X_w\\
- Y_w\\
- Z_w\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{w}\textrm{R}_b & _{}^{w}\textrm{t}_b \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_b\\
- Y_b\\
- Z_b\\
- 1
- \end{bmatrix}
- \)
- \(
- \begin{bmatrix}
- X_c\\
- Y_c\\
- Z_c\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{c}\textrm{R}_g & _{}^{c}\textrm{t}_g \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_g\\
- Y_g\\
- Z_g\\
- 1
- \end{bmatrix}
- \)
- This problem is also known as solving the \(\mathbf{A}\mathbf{X}=\mathbf{Z}\mathbf{B}\) equation, with:
- <ul>
- <li>
- \(\mathbf{A} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_w\)
- </li>
- <li>
- \(\mathbf{X} \Leftrightarrow \hspace{0.1em} _{}^{w}\textrm{T}_b\)
- </li>
- <li>
- \(\mathbf{Z} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_g\)
- </li>
- <li>
- \(\mathbf{B} \Leftrightarrow \hspace{0.1em} _{}^{g}\textrm{T}_b\)
- </li>
- </ul>
- \note
- At least 3 measurements are required (input vectors size must be greater or equal to 3).</dd>
- </dl>
- </li>
- </ul>
- <a name="calibrationMatrixValues-org.opencv.core.Mat-org.opencv.core.Size-double-double-double:A-double:A-double:A-org.opencv.core.Point-double:A-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>calibrationMatrixValues</h4>
- <pre>public static void calibrationMatrixValues(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- double apertureWidth,
- double apertureHeight,
- double[] fovx,
- double[] fovy,
- double[] focalLength,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> principalPoint,
- double[] aspectRatio)</pre>
- <div class="block">Computes useful camera characteristics from the camera intrinsic matrix.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix that can be estimated by #calibrateCamera or
- #stereoCalibrate .</dd>
- <dd><code>imageSize</code> - Input image size in pixels.</dd>
- <dd><code>apertureWidth</code> - Physical width in mm of the sensor.</dd>
- <dd><code>apertureHeight</code> - Physical height in mm of the sensor.</dd>
- <dd><code>fovx</code> - Output field of view in degrees along the horizontal sensor axis.</dd>
- <dd><code>fovy</code> - Output field of view in degrees along the vertical sensor axis.</dd>
- <dd><code>focalLength</code> - Focal length of the lens in mm.</dd>
- <dd><code>principalPoint</code> - Principal point in mm.</dd>
- <dd><code>aspectRatio</code> - \(f_y/f_x\)
- The function computes various useful camera characteristics from the previously estimated camera
- matrix.
- <b>Note:</b>
- Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for
- the chessboard pitch (it can thus be any value).</dd>
- </dl>
- </li>
- </ul>
- <a name="checkChessboard-org.opencv.core.Mat-org.opencv.core.Size-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>checkChessboard</h4>
- <pre>public static boolean checkChessboard(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> img,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> size)</pre>
- </li>
- </ul>
- <a name="composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>composeRT</h4>
- <pre>public static void composeRT(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3)</pre>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>rvec1</code> - First rotation vector.</dd>
- <dd><code>tvec1</code> - First translation vector.</dd>
- <dd><code>rvec2</code> - Second rotation vector.</dd>
- <dd><code>tvec2</code> - Second translation vector.</dd>
- <dd><code>rvec3</code> - Output rotation vector of the superposition.</dd>
- <dd><code>tvec3</code> - Output translation vector of the superposition.
- The functions compute:
- \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
- where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
- \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
- Also, the functions can compute the derivatives of the output vectors with regards to the input
- vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- function that contains a matrix multiplication.</dd>
- </dl>
- </li>
- </ul>
- <a name="composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>composeRT</h4>
- <pre>public static void composeRT(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr1)</pre>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>rvec1</code> - First rotation vector.</dd>
- <dd><code>tvec1</code> - First translation vector.</dd>
- <dd><code>rvec2</code> - Second rotation vector.</dd>
- <dd><code>tvec2</code> - Second translation vector.</dd>
- <dd><code>rvec3</code> - Output rotation vector of the superposition.</dd>
- <dd><code>tvec3</code> - Output translation vector of the superposition.</dd>
- <dd><code>dr3dr1</code> - Optional output derivative of rvec3 with regard to rvec1
- The functions compute:
- \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
- where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
- \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
- Also, the functions can compute the derivatives of the output vectors with regards to the input
- vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- function that contains a matrix multiplication.</dd>
- </dl>
- </li>
- </ul>
- <a name="composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>composeRT</h4>
- <pre>public static void composeRT(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt1)</pre>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>rvec1</code> - First rotation vector.</dd>
- <dd><code>tvec1</code> - First translation vector.</dd>
- <dd><code>rvec2</code> - Second rotation vector.</dd>
- <dd><code>tvec2</code> - Second translation vector.</dd>
- <dd><code>rvec3</code> - Output rotation vector of the superposition.</dd>
- <dd><code>tvec3</code> - Output translation vector of the superposition.</dd>
- <dd><code>dr3dr1</code> - Optional output derivative of rvec3 with regard to rvec1</dd>
- <dd><code>dr3dt1</code> - Optional output derivative of rvec3 with regard to tvec1
- The functions compute:
- \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
- where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
- \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
- Also, the functions can compute the derivatives of the output vectors with regards to the input
- vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- function that contains a matrix multiplication.</dd>
- </dl>
- </li>
- </ul>
- <a name="composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>composeRT</h4>
- <pre>public static void composeRT(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr2)</pre>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>rvec1</code> - First rotation vector.</dd>
- <dd><code>tvec1</code> - First translation vector.</dd>
- <dd><code>rvec2</code> - Second rotation vector.</dd>
- <dd><code>tvec2</code> - Second translation vector.</dd>
- <dd><code>rvec3</code> - Output rotation vector of the superposition.</dd>
- <dd><code>tvec3</code> - Output translation vector of the superposition.</dd>
- <dd><code>dr3dr1</code> - Optional output derivative of rvec3 with regard to rvec1</dd>
- <dd><code>dr3dt1</code> - Optional output derivative of rvec3 with regard to tvec1</dd>
- <dd><code>dr3dr2</code> - Optional output derivative of rvec3 with regard to rvec2
- The functions compute:
- \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
- where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
- \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
- Also, the functions can compute the derivatives of the output vectors with regards to the input
- vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- function that contains a matrix multiplication.</dd>
- </dl>
- </li>
- </ul>
- <a name="composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>composeRT</h4>
- <pre>public static void composeRT(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt2)</pre>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>rvec1</code> - First rotation vector.</dd>
- <dd><code>tvec1</code> - First translation vector.</dd>
- <dd><code>rvec2</code> - Second rotation vector.</dd>
- <dd><code>tvec2</code> - Second translation vector.</dd>
- <dd><code>rvec3</code> - Output rotation vector of the superposition.</dd>
- <dd><code>tvec3</code> - Output translation vector of the superposition.</dd>
- <dd><code>dr3dr1</code> - Optional output derivative of rvec3 with regard to rvec1</dd>
- <dd><code>dr3dt1</code> - Optional output derivative of rvec3 with regard to tvec1</dd>
- <dd><code>dr3dr2</code> - Optional output derivative of rvec3 with regard to rvec2</dd>
- <dd><code>dr3dt2</code> - Optional output derivative of rvec3 with regard to tvec2
- The functions compute:
- \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
- where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
- \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
- Also, the functions can compute the derivatives of the output vectors with regards to the input
- vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- function that contains a matrix multiplication.</dd>
- </dl>
- </li>
- </ul>
- <a name="composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>composeRT</h4>
- <pre>public static void composeRT(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dr1)</pre>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>rvec1</code> - First rotation vector.</dd>
- <dd><code>tvec1</code> - First translation vector.</dd>
- <dd><code>rvec2</code> - Second rotation vector.</dd>
- <dd><code>tvec2</code> - Second translation vector.</dd>
- <dd><code>rvec3</code> - Output rotation vector of the superposition.</dd>
- <dd><code>tvec3</code> - Output translation vector of the superposition.</dd>
- <dd><code>dr3dr1</code> - Optional output derivative of rvec3 with regard to rvec1</dd>
- <dd><code>dr3dt1</code> - Optional output derivative of rvec3 with regard to tvec1</dd>
- <dd><code>dr3dr2</code> - Optional output derivative of rvec3 with regard to rvec2</dd>
- <dd><code>dr3dt2</code> - Optional output derivative of rvec3 with regard to tvec2</dd>
- <dd><code>dt3dr1</code> - Optional output derivative of tvec3 with regard to rvec1
- The functions compute:
- \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
- where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
- \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
- Also, the functions can compute the derivatives of the output vectors with regards to the input
- vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- function that contains a matrix multiplication.</dd>
- </dl>
- </li>
- </ul>
- <a name="composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>composeRT</h4>
- <pre>public static void composeRT(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dt1)</pre>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>rvec1</code> - First rotation vector.</dd>
- <dd><code>tvec1</code> - First translation vector.</dd>
- <dd><code>rvec2</code> - Second rotation vector.</dd>
- <dd><code>tvec2</code> - Second translation vector.</dd>
- <dd><code>rvec3</code> - Output rotation vector of the superposition.</dd>
- <dd><code>tvec3</code> - Output translation vector of the superposition.</dd>
- <dd><code>dr3dr1</code> - Optional output derivative of rvec3 with regard to rvec1</dd>
- <dd><code>dr3dt1</code> - Optional output derivative of rvec3 with regard to tvec1</dd>
- <dd><code>dr3dr2</code> - Optional output derivative of rvec3 with regard to rvec2</dd>
- <dd><code>dr3dt2</code> - Optional output derivative of rvec3 with regard to tvec2</dd>
- <dd><code>dt3dr1</code> - Optional output derivative of tvec3 with regard to rvec1</dd>
- <dd><code>dt3dt1</code> - Optional output derivative of tvec3 with regard to tvec1
- The functions compute:
- \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
- where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
- \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
- Also, the functions can compute the derivatives of the output vectors with regards to the input
- vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- function that contains a matrix multiplication.</dd>
- </dl>
- </li>
- </ul>
- <a name="composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>composeRT</h4>
- <pre>public static void composeRT(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dr2)</pre>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>rvec1</code> - First rotation vector.</dd>
- <dd><code>tvec1</code> - First translation vector.</dd>
- <dd><code>rvec2</code> - Second rotation vector.</dd>
- <dd><code>tvec2</code> - Second translation vector.</dd>
- <dd><code>rvec3</code> - Output rotation vector of the superposition.</dd>
- <dd><code>tvec3</code> - Output translation vector of the superposition.</dd>
- <dd><code>dr3dr1</code> - Optional output derivative of rvec3 with regard to rvec1</dd>
- <dd><code>dr3dt1</code> - Optional output derivative of rvec3 with regard to tvec1</dd>
- <dd><code>dr3dr2</code> - Optional output derivative of rvec3 with regard to rvec2</dd>
- <dd><code>dr3dt2</code> - Optional output derivative of rvec3 with regard to tvec2</dd>
- <dd><code>dt3dr1</code> - Optional output derivative of tvec3 with regard to rvec1</dd>
- <dd><code>dt3dt1</code> - Optional output derivative of tvec3 with regard to tvec1</dd>
- <dd><code>dt3dr2</code> - Optional output derivative of tvec3 with regard to rvec2
- The functions compute:
- \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
- where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
- \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
- Also, the functions can compute the derivatives of the output vectors with regards to the input
- vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- function that contains a matrix multiplication.</dd>
- </dl>
- </li>
- </ul>
- <a name="composeRT-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>composeRT</h4>
- <pre>public static void composeRT(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dr2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dr3dt2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dr2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dt3dt2)</pre>
- <div class="block">Combines two rotation-and-shift transformations.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>rvec1</code> - First rotation vector.</dd>
- <dd><code>tvec1</code> - First translation vector.</dd>
- <dd><code>rvec2</code> - Second rotation vector.</dd>
- <dd><code>tvec2</code> - Second translation vector.</dd>
- <dd><code>rvec3</code> - Output rotation vector of the superposition.</dd>
- <dd><code>tvec3</code> - Output translation vector of the superposition.</dd>
- <dd><code>dr3dr1</code> - Optional output derivative of rvec3 with regard to rvec1</dd>
- <dd><code>dr3dt1</code> - Optional output derivative of rvec3 with regard to tvec1</dd>
- <dd><code>dr3dr2</code> - Optional output derivative of rvec3 with regard to rvec2</dd>
- <dd><code>dr3dt2</code> - Optional output derivative of rvec3 with regard to tvec2</dd>
- <dd><code>dt3dr1</code> - Optional output derivative of tvec3 with regard to rvec1</dd>
- <dd><code>dt3dt1</code> - Optional output derivative of tvec3 with regard to tvec1</dd>
- <dd><code>dt3dr2</code> - Optional output derivative of tvec3 with regard to rvec2</dd>
- <dd><code>dt3dt2</code> - Optional output derivative of tvec3 with regard to tvec2
- The functions compute:
- \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\)
- where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
- \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
- Also, the functions can compute the derivatives of the output vectors with regards to the input
- vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
- your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
- function that contains a matrix multiplication.</dd>
- </dl>
- </li>
- </ul>
- <a name="computeCorrespondEpilines-org.opencv.core.Mat-int-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>computeCorrespondEpilines</h4>
- <pre>public static void computeCorrespondEpilines(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points,
- int whichImage,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> lines)</pre>
- <div class="block">For points in an image of a stereo pair, computes the corresponding epilines in the other image.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points</code> - Input points. \(N \times 1\) or \(1 \times N\) matrix of type CV_32FC2 or
- vector<Point2f> .</dd>
- <dd><code>whichImage</code> - Index of the image (1 or 2) that contains the points .</dd>
- <dd><code>F</code> - Fundamental matrix that can be estimated using #findFundamentalMat or #stereoRectify .</dd>
- <dd><code>lines</code> - Output vector of the epipolar lines corresponding to the points in the other image.
- Each line \(ax + by + c=0\) is encoded by 3 numbers \((a, b, c)\) .
- For every point in one of the two images of a stereo pair, the function finds the equation of the
- corresponding epipolar line in the other image.
- From the fundamental matrix definition (see #findFundamentalMat ), line \(l^{(2)}_i\) in the second
- image for the point \(p^{(1)}_i\) in the first image (when whichImage=1 ) is computed as:
- \(l^{(2)}_i = F p^{(1)}_i\)
- And vice versa, when whichImage=2, \(l^{(1)}_i\) is computed from \(p^{(2)}_i\) as:
- \(l^{(1)}_i = F^T p^{(2)}_i\)
- Line coefficients are defined up to a scale. They are normalized so that \(a_i^2+b_i^2=1\) .</dd>
- </dl>
- </li>
- </ul>
- <a name="convertPointsFromHomogeneous-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>convertPointsFromHomogeneous</h4>
- <pre>public static void convertPointsFromHomogeneous(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst)</pre>
- <div class="block">Converts points from homogeneous to Euclidean space.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - Input vector of N-dimensional points.</dd>
- <dd><code>dst</code> - Output vector of N-1-dimensional points.
- The function converts points homogeneous to Euclidean space using perspective projection. That is,
- each point (x1, x2, ... x(n-1), xn) is converted to (x1/xn, x2/xn, ..., x(n-1)/xn). When xn=0, the
- output point coordinates will be (0,0,0,...).</dd>
- </dl>
- </li>
- </ul>
- <a name="convertPointsToHomogeneous-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>convertPointsToHomogeneous</h4>
- <pre>public static void convertPointsToHomogeneous(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst)</pre>
- <div class="block">Converts points from Euclidean to homogeneous space.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - Input vector of N-dimensional points.</dd>
- <dd><code>dst</code> - Output vector of N+1-dimensional points.
- The function converts points from Euclidean to homogeneous space by appending 1's to the tuple of
- point coordinates. That is, each point (x1, x2, ..., xn) is converted to (x1, x2, ..., xn, 1).</dd>
- </dl>
- </li>
- </ul>
- <a name="correctMatches-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>correctMatches</h4>
- <pre>public static void correctMatches(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newPoints1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newPoints2)</pre>
- <div class="block">Refines coordinates of corresponding points.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>F</code> - 3x3 fundamental matrix.</dd>
- <dd><code>points1</code> - 1xN array containing the first set of points.</dd>
- <dd><code>points2</code> - 1xN array containing the second set of points.</dd>
- <dd><code>newPoints1</code> - The optimized points1.</dd>
- <dd><code>newPoints2</code> - The optimized points2.
- The function implements the Optimal Triangulation Method (see Multiple View Geometry CITE: HartleyZ00 for details).
- For each given point correspondence points1[i] <-> points2[i], and a fundamental matrix F, it
- computes the corrected correspondences newPoints1[i] <-> newPoints2[i] that minimize the geometric
- error \(d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2\) (where \(d(a,b)\) is the
- geometric distance between points \(a\) and \(b\) ) subject to the epipolar constraint
- \(newPoints2^T \cdot F \cdot newPoints1 = 0\) .</dd>
- </dl>
- </li>
- </ul>
- <a name="decomposeEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>decomposeEssentialMat</h4>
- <pre>public static void decomposeEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t)</pre>
- <div class="block">Decompose an essential matrix to possible rotations and translation.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>E</code> - The input essential matrix.</dd>
- <dd><code>R1</code> - One possible rotation matrix.</dd>
- <dd><code>R2</code> - Another possible rotation matrix.</dd>
- <dd><code>t</code> - One possible translation.
- This function decomposes the essential matrix E using svd decomposition CITE: HartleyZ00. In
- general, four possible poses exist for the decomposition of E. They are \([R_1, t]\),
- \([R_1, -t]\), \([R_2, t]\), \([R_2, -t]\).
- If E gives the epipolar constraint \([p_2; 1]^T A^{-T} E A^{-1} [p_1; 1] = 0\) between the image
- points \(p_1\) in the first image and \(p_2\) in second image, then any of the tuples
- \([R_1, t]\), \([R_1, -t]\), \([R_2, t]\), \([R_2, -t]\) is a change of basis from the first
- camera's coordinate system to the second camera's coordinate system. However, by decomposing E, one
- can only get the direction of the translation. For this reason, the translation t is returned with
- unit length.</dd>
- </dl>
- </li>
- </ul>
- <a name="decomposeHomographyMat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-java.util.List-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>decomposeHomographyMat</h4>
- <pre>public static int decomposeHomographyMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> H,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rotations,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> translations,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> normals)</pre>
- <div class="block">Decompose a homography matrix to rotation(s), translation(s) and plane normal(s).</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>H</code> - The input homography matrix between two images.</dd>
- <dd><code>K</code> - The input camera intrinsic matrix.</dd>
- <dd><code>rotations</code> - Array of rotation matrices.</dd>
- <dd><code>translations</code> - Array of translation matrices.</dd>
- <dd><code>normals</code> - Array of plane normal matrices.
- This function extracts relative camera motion between two views of a planar object and returns up to
- four mathematical solution tuples of rotation, translation, and plane normal. The decomposition of
- the homography matrix H is described in detail in CITE: Malis2007.
- If the homography H, induced by the plane, gives the constraint
- \(s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\) on the source image points
- \(p_i\) and the destination image points \(p'_i\), then the tuple of rotations[k] and
- translations[k] is a change of basis from the source camera's coordinate system to the destination
- camera's coordinate system. However, by decomposing H, one can only get the translation normalized
- by the (typically unknown) depth of the scene, i.e. its direction but with normalized length.
- If point correspondences are available, at least two solutions may further be invalidated, by
- applying positive depth constraint, i.e. all points must be in front of the camera.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="decomposeProjectionMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>decomposeProjectionMatrix</h4>
- <pre>public static void decomposeProjectionMatrix(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> transVect)</pre>
- <div class="block">Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>projMatrix</code> - 3x4 input projection matrix P.</dd>
- <dd><code>cameraMatrix</code> - Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\).</dd>
- <dd><code>rotMatrix</code> - Output 3x3 external rotation matrix R.</dd>
- <dd><code>transVect</code> - Output 4x1 translation vector T.
- degrees.
- The function computes a decomposition of a projection matrix into a calibration and a rotation
- matrix and the position of a camera.
- It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
- be used in OpenGL. Note, there is always more than one sequence of rotations about the three
- principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
- tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
- The function is based on #RQDecomp3x3 .</dd>
- </dl>
- </li>
- </ul>
- <a name="decomposeProjectionMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>decomposeProjectionMatrix</h4>
- <pre>public static void decomposeProjectionMatrix(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> transVect,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixX)</pre>
- <div class="block">Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>projMatrix</code> - 3x4 input projection matrix P.</dd>
- <dd><code>cameraMatrix</code> - Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\).</dd>
- <dd><code>rotMatrix</code> - Output 3x3 external rotation matrix R.</dd>
- <dd><code>transVect</code> - Output 4x1 translation vector T.</dd>
- <dd><code>rotMatrixX</code> - Optional 3x3 rotation matrix around x-axis.
- degrees.
- The function computes a decomposition of a projection matrix into a calibration and a rotation
- matrix and the position of a camera.
- It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
- be used in OpenGL. Note, there is always more than one sequence of rotations about the three
- principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
- tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
- The function is based on #RQDecomp3x3 .</dd>
- </dl>
- </li>
- </ul>
- <a name="decomposeProjectionMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>decomposeProjectionMatrix</h4>
- <pre>public static void decomposeProjectionMatrix(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> transVect,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixX,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixY)</pre>
- <div class="block">Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>projMatrix</code> - 3x4 input projection matrix P.</dd>
- <dd><code>cameraMatrix</code> - Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\).</dd>
- <dd><code>rotMatrix</code> - Output 3x3 external rotation matrix R.</dd>
- <dd><code>transVect</code> - Output 4x1 translation vector T.</dd>
- <dd><code>rotMatrixX</code> - Optional 3x3 rotation matrix around x-axis.</dd>
- <dd><code>rotMatrixY</code> - Optional 3x3 rotation matrix around y-axis.
- degrees.
- The function computes a decomposition of a projection matrix into a calibration and a rotation
- matrix and the position of a camera.
- It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
- be used in OpenGL. Note, there is always more than one sequence of rotations about the three
- principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
- tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
- The function is based on #RQDecomp3x3 .</dd>
- </dl>
- </li>
- </ul>
- <a name="decomposeProjectionMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>decomposeProjectionMatrix</h4>
- <pre>public static void decomposeProjectionMatrix(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> transVect,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixX,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixY,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixZ)</pre>
- <div class="block">Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>projMatrix</code> - 3x4 input projection matrix P.</dd>
- <dd><code>cameraMatrix</code> - Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\).</dd>
- <dd><code>rotMatrix</code> - Output 3x3 external rotation matrix R.</dd>
- <dd><code>transVect</code> - Output 4x1 translation vector T.</dd>
- <dd><code>rotMatrixX</code> - Optional 3x3 rotation matrix around x-axis.</dd>
- <dd><code>rotMatrixY</code> - Optional 3x3 rotation matrix around y-axis.</dd>
- <dd><code>rotMatrixZ</code> - Optional 3x3 rotation matrix around z-axis.
- degrees.
- The function computes a decomposition of a projection matrix into a calibration and a rotation
- matrix and the position of a camera.
- It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
- be used in OpenGL. Note, there is always more than one sequence of rotations about the three
- principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
- tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
- The function is based on #RQDecomp3x3 .</dd>
- </dl>
- </li>
- </ul>
- <a name="decomposeProjectionMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>decomposeProjectionMatrix</h4>
- <pre>public static void decomposeProjectionMatrix(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> transVect,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixX,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixY,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rotMatrixZ,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> eulerAngles)</pre>
- <div class="block">Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>projMatrix</code> - 3x4 input projection matrix P.</dd>
- <dd><code>cameraMatrix</code> - Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\).</dd>
- <dd><code>rotMatrix</code> - Output 3x3 external rotation matrix R.</dd>
- <dd><code>transVect</code> - Output 4x1 translation vector T.</dd>
- <dd><code>rotMatrixX</code> - Optional 3x3 rotation matrix around x-axis.</dd>
- <dd><code>rotMatrixY</code> - Optional 3x3 rotation matrix around y-axis.</dd>
- <dd><code>rotMatrixZ</code> - Optional 3x3 rotation matrix around z-axis.</dd>
- <dd><code>eulerAngles</code> - Optional three-element vector containing three Euler angles of rotation in
- degrees.
- The function computes a decomposition of a projection matrix into a calibration and a rotation
- matrix and the position of a camera.
- It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
- be used in OpenGL. Note, there is always more than one sequence of rotations about the three
- principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
- tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
- The function is based on #RQDecomp3x3 .</dd>
- </dl>
- </li>
- </ul>
- <a name="drawChessboardCorners-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.MatOfPoint2f-boolean-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>drawChessboardCorners</h4>
- <pre>public static void drawChessboardCorners(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> corners,
- boolean patternWasFound)</pre>
- <div class="block">Renders the detected chessboard corners.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>image</code> - Destination image. It must be an 8-bit color image.</dd>
- <dd><code>patternSize</code> - Number of inner corners per a chessboard row and column
- (patternSize = cv::Size(points_per_row,points_per_column)).</dd>
- <dd><code>corners</code> - Array of detected corners, the output of #findChessboardCorners.</dd>
- <dd><code>patternWasFound</code> - Parameter indicating whether the complete board was found or not. The
- return value of #findChessboardCorners should be passed here.
- The function draws individual chessboard corners detected either as red circles if the board was not
- found, or as colored corners connected with lines if the board was found.</dd>
- </dl>
- </li>
- </ul>
- <a name="drawFrameAxes-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-float-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>drawFrameAxes</h4>
- <pre>public static void drawFrameAxes(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- float length)</pre>
- <div class="block">Draw axes of the world/object coordinate system from pose estimation. SEE: solvePnP</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>image</code> - Input/output image. It must have 1 or 3 channels. The number of channels is not altered.</dd>
- <dd><code>cameraMatrix</code> - Input 3x3 floating-point matrix of camera intrinsic parameters.
- \(\cameramatrix{A}\)</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>rvec</code> - Rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvec</code> - Translation vector.</dd>
- <dd><code>length</code> - Length of the painted axes in the same unit than tvec (usually in meters).
- This function draws the axes of the world/object coordinate system w.r.t. to the camera frame.
- OX is drawn in red, OY in green and OZ in blue.</dd>
- </dl>
- </li>
- </ul>
- <a name="drawFrameAxes-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-float-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>drawFrameAxes</h4>
- <pre>public static void drawFrameAxes(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- float length,
- int thickness)</pre>
- <div class="block">Draw axes of the world/object coordinate system from pose estimation. SEE: solvePnP</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>image</code> - Input/output image. It must have 1 or 3 channels. The number of channels is not altered.</dd>
- <dd><code>cameraMatrix</code> - Input 3x3 floating-point matrix of camera intrinsic parameters.
- \(\cameramatrix{A}\)</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>rvec</code> - Rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvec</code> - Translation vector.</dd>
- <dd><code>length</code> - Length of the painted axes in the same unit than tvec (usually in meters).</dd>
- <dd><code>thickness</code> - Line thickness of the painted axes.
- This function draws the axes of the world/object coordinate system w.r.t. to the camera frame.
- OX is drawn in red, OY in green and OZ in blue.</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffine2D-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffine2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffine2D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to)</pre>
- <div class="block">Computes an optimal affine transformation between two 2D point sets.
- It computes
- \(
- \begin{bmatrix}
- x\\
- y\\
- \end{bmatrix}
- =
- \begin{bmatrix}
- a_{11} & a_{12}\\
- a_{21} & a_{22}\\
- \end{bmatrix}
- \begin{bmatrix}
- X\\
- Y\\
- \end{bmatrix}
- +
- \begin{bmatrix}
- b_1\\
- b_2\\
- \end{bmatrix}
- \)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>from</code> - First input 2D point set containing \((X,Y)\).</dd>
- <dd><code>to</code> - Second input 2D point set containing \((x,y)\).
- <ul>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- RANSAC is the default method.
- </li>
- </ul>
- a point as an inlier. Applies only to RANSAC.
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- Passing 0 will disable refining, so the output matrix will be output of robust method.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
- could not be estimated. The returned matrix has the following form:
- \(
- \begin{bmatrix}
- a_{11} & a_{12} & b_1\\
- a_{21} & a_{22} & b_2\\
- \end{bmatrix}
- \)
- The function estimates an optimal 2D affine transformation between two 2D point sets using the
- selected robust algorithm.
- The computed transformation is then refined further (using only inliers) with the
- Levenberg-Marquardt method to reduce the re-projection error even more.
- <b>Note:</b>
- The RANSAC method can handle practically any ratio of outliers but needs a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers.
- SEE: estimateAffinePartial2D, getAffineTransform</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffine2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffine2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffine2D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers)</pre>
- <div class="block">Computes an optimal affine transformation between two 2D point sets.
- It computes
- \(
- \begin{bmatrix}
- x\\
- y\\
- \end{bmatrix}
- =
- \begin{bmatrix}
- a_{11} & a_{12}\\
- a_{21} & a_{22}\\
- \end{bmatrix}
- \begin{bmatrix}
- X\\
- Y\\
- \end{bmatrix}
- +
- \begin{bmatrix}
- b_1\\
- b_2\\
- \end{bmatrix}
- \)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>from</code> - First input 2D point set containing \((X,Y)\).</dd>
- <dd><code>to</code> - Second input 2D point set containing \((x,y)\).</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers (1-inlier, 0-outlier).
- <ul>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- RANSAC is the default method.
- </li>
- </ul>
- a point as an inlier. Applies only to RANSAC.
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- Passing 0 will disable refining, so the output matrix will be output of robust method.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
- could not be estimated. The returned matrix has the following form:
- \(
- \begin{bmatrix}
- a_{11} & a_{12} & b_1\\
- a_{21} & a_{22} & b_2\\
- \end{bmatrix}
- \)
- The function estimates an optimal 2D affine transformation between two 2D point sets using the
- selected robust algorithm.
- The computed transformation is then refined further (using only inliers) with the
- Levenberg-Marquardt method to reduce the re-projection error even more.
- <b>Note:</b>
- The RANSAC method can handle practically any ratio of outliers but needs a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers.
- SEE: estimateAffinePartial2D, getAffineTransform</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffine2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffine2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffine2D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method)</pre>
- <div class="block">Computes an optimal affine transformation between two 2D point sets.
- It computes
- \(
- \begin{bmatrix}
- x\\
- y\\
- \end{bmatrix}
- =
- \begin{bmatrix}
- a_{11} & a_{12}\\
- a_{21} & a_{22}\\
- \end{bmatrix}
- \begin{bmatrix}
- X\\
- Y\\
- \end{bmatrix}
- +
- \begin{bmatrix}
- b_1\\
- b_2\\
- \end{bmatrix}
- \)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>from</code> - First input 2D point set containing \((X,Y)\).</dd>
- <dd><code>to</code> - Second input 2D point set containing \((x,y)\).</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers (1-inlier, 0-outlier).</dd>
- <dd><code>method</code> - Robust method used to compute transformation. The following methods are possible:
- <ul>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- RANSAC is the default method.
- </li>
- </ul>
- a point as an inlier. Applies only to RANSAC.
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- Passing 0 will disable refining, so the output matrix will be output of robust method.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
- could not be estimated. The returned matrix has the following form:
- \(
- \begin{bmatrix}
- a_{11} & a_{12} & b_1\\
- a_{21} & a_{22} & b_2\\
- \end{bmatrix}
- \)
- The function estimates an optimal 2D affine transformation between two 2D point sets using the
- selected robust algorithm.
- The computed transformation is then refined further (using only inliers) with the
- Levenberg-Marquardt method to reduce the re-projection error even more.
- <b>Note:</b>
- The RANSAC method can handle practically any ratio of outliers but needs a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers.
- SEE: estimateAffinePartial2D, getAffineTransform</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffine2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffine2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffine2D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method,
- double ransacReprojThreshold)</pre>
- <div class="block">Computes an optimal affine transformation between two 2D point sets.
- It computes
- \(
- \begin{bmatrix}
- x\\
- y\\
- \end{bmatrix}
- =
- \begin{bmatrix}
- a_{11} & a_{12}\\
- a_{21} & a_{22}\\
- \end{bmatrix}
- \begin{bmatrix}
- X\\
- Y\\
- \end{bmatrix}
- +
- \begin{bmatrix}
- b_1\\
- b_2\\
- \end{bmatrix}
- \)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>from</code> - First input 2D point set containing \((X,Y)\).</dd>
- <dd><code>to</code> - Second input 2D point set containing \((x,y)\).</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers (1-inlier, 0-outlier).</dd>
- <dd><code>method</code> - Robust method used to compute transformation. The following methods are possible:
- <ul>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- RANSAC is the default method.
- </li>
- </ul></dd>
- <dd><code>ransacReprojThreshold</code> - Maximum reprojection error in the RANSAC algorithm to consider
- a point as an inlier. Applies only to RANSAC.
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- Passing 0 will disable refining, so the output matrix will be output of robust method.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
- could not be estimated. The returned matrix has the following form:
- \(
- \begin{bmatrix}
- a_{11} & a_{12} & b_1\\
- a_{21} & a_{22} & b_2\\
- \end{bmatrix}
- \)
- The function estimates an optimal 2D affine transformation between two 2D point sets using the
- selected robust algorithm.
- The computed transformation is then refined further (using only inliers) with the
- Levenberg-Marquardt method to reduce the re-projection error even more.
- <b>Note:</b>
- The RANSAC method can handle practically any ratio of outliers but needs a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers.
- SEE: estimateAffinePartial2D, getAffineTransform</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffine2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-long-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffine2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffine2D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method,
- double ransacReprojThreshold,
- long maxIters)</pre>
- <div class="block">Computes an optimal affine transformation between two 2D point sets.
- It computes
- \(
- \begin{bmatrix}
- x\\
- y\\
- \end{bmatrix}
- =
- \begin{bmatrix}
- a_{11} & a_{12}\\
- a_{21} & a_{22}\\
- \end{bmatrix}
- \begin{bmatrix}
- X\\
- Y\\
- \end{bmatrix}
- +
- \begin{bmatrix}
- b_1\\
- b_2\\
- \end{bmatrix}
- \)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>from</code> - First input 2D point set containing \((X,Y)\).</dd>
- <dd><code>to</code> - Second input 2D point set containing \((x,y)\).</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers (1-inlier, 0-outlier).</dd>
- <dd><code>method</code> - Robust method used to compute transformation. The following methods are possible:
- <ul>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- RANSAC is the default method.
- </li>
- </ul></dd>
- <dd><code>ransacReprojThreshold</code> - Maximum reprojection error in the RANSAC algorithm to consider
- a point as an inlier. Applies only to RANSAC.</dd>
- <dd><code>maxIters</code> - The maximum number of robust method iterations.
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- Passing 0 will disable refining, so the output matrix will be output of robust method.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
- could not be estimated. The returned matrix has the following form:
- \(
- \begin{bmatrix}
- a_{11} & a_{12} & b_1\\
- a_{21} & a_{22} & b_2\\
- \end{bmatrix}
- \)
- The function estimates an optimal 2D affine transformation between two 2D point sets using the
- selected robust algorithm.
- The computed transformation is then refined further (using only inliers) with the
- Levenberg-Marquardt method to reduce the re-projection error even more.
- <b>Note:</b>
- The RANSAC method can handle practically any ratio of outliers but needs a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers.
- SEE: estimateAffinePartial2D, getAffineTransform</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffine2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-long-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffine2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffine2D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method,
- double ransacReprojThreshold,
- long maxIters,
- double confidence)</pre>
- <div class="block">Computes an optimal affine transformation between two 2D point sets.
- It computes
- \(
- \begin{bmatrix}
- x\\
- y\\
- \end{bmatrix}
- =
- \begin{bmatrix}
- a_{11} & a_{12}\\
- a_{21} & a_{22}\\
- \end{bmatrix}
- \begin{bmatrix}
- X\\
- Y\\
- \end{bmatrix}
- +
- \begin{bmatrix}
- b_1\\
- b_2\\
- \end{bmatrix}
- \)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>from</code> - First input 2D point set containing \((X,Y)\).</dd>
- <dd><code>to</code> - Second input 2D point set containing \((x,y)\).</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers (1-inlier, 0-outlier).</dd>
- <dd><code>method</code> - Robust method used to compute transformation. The following methods are possible:
- <ul>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- RANSAC is the default method.
- </li>
- </ul></dd>
- <dd><code>ransacReprojThreshold</code> - Maximum reprojection error in the RANSAC algorithm to consider
- a point as an inlier. Applies only to RANSAC.</dd>
- <dd><code>maxIters</code> - The maximum number of robust method iterations.</dd>
- <dd><code>confidence</code> - Confidence level, between 0 and 1, for the estimated transformation. Anything
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- Passing 0 will disable refining, so the output matrix will be output of robust method.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
- could not be estimated. The returned matrix has the following form:
- \(
- \begin{bmatrix}
- a_{11} & a_{12} & b_1\\
- a_{21} & a_{22} & b_2\\
- \end{bmatrix}
- \)
- The function estimates an optimal 2D affine transformation between two 2D point sets using the
- selected robust algorithm.
- The computed transformation is then refined further (using only inliers) with the
- Levenberg-Marquardt method to reduce the re-projection error even more.
- <b>Note:</b>
- The RANSAC method can handle practically any ratio of outliers but needs a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers.
- SEE: estimateAffinePartial2D, getAffineTransform</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffine2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-long-double-long-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffine2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffine2D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method,
- double ransacReprojThreshold,
- long maxIters,
- double confidence,
- long refineIters)</pre>
- <div class="block">Computes an optimal affine transformation between two 2D point sets.
- It computes
- \(
- \begin{bmatrix}
- x\\
- y\\
- \end{bmatrix}
- =
- \begin{bmatrix}
- a_{11} & a_{12}\\
- a_{21} & a_{22}\\
- \end{bmatrix}
- \begin{bmatrix}
- X\\
- Y\\
- \end{bmatrix}
- +
- \begin{bmatrix}
- b_1\\
- b_2\\
- \end{bmatrix}
- \)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>from</code> - First input 2D point set containing \((X,Y)\).</dd>
- <dd><code>to</code> - Second input 2D point set containing \((x,y)\).</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers (1-inlier, 0-outlier).</dd>
- <dd><code>method</code> - Robust method used to compute transformation. The following methods are possible:
- <ul>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- RANSAC is the default method.
- </li>
- </ul></dd>
- <dd><code>ransacReprojThreshold</code> - Maximum reprojection error in the RANSAC algorithm to consider
- a point as an inlier. Applies only to RANSAC.</dd>
- <dd><code>maxIters</code> - The maximum number of robust method iterations.</dd>
- <dd><code>confidence</code> - Confidence level, between 0 and 1, for the estimated transformation. Anything
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.</dd>
- <dd><code>refineIters</code> - Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
- Passing 0 will disable refining, so the output matrix will be output of robust method.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
- could not be estimated. The returned matrix has the following form:
- \(
- \begin{bmatrix}
- a_{11} & a_{12} & b_1\\
- a_{21} & a_{22} & b_2\\
- \end{bmatrix}
- \)
- The function estimates an optimal 2D affine transformation between two 2D point sets using the
- selected robust algorithm.
- The computed transformation is then refined further (using only inliers) with the
- Levenberg-Marquardt method to reduce the re-projection error even more.
- <b>Note:</b>
- The RANSAC method can handle practically any ratio of outliers but needs a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers.
- SEE: estimateAffinePartial2D, getAffineTransform</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffine2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.calib3d.UsacParams-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffine2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffine2D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> pts1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> pts2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- <a href="../../../org/opencv/calib3d/UsacParams.html" title="class in org.opencv.calib3d">UsacParams</a> params)</pre>
- </li>
- </ul>
- <a name="estimateAffine3D-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffine3D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffine3D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst)</pre>
- <div class="block">Computes an optimal affine transformation between two 3D point sets.
- It computes \(R,s,t\) minimizing \(\sum{i} dst_i - c \cdot R \cdot src_i \)
- where \(R\) is a 3x3 rotation matrix, \(t\) is a 3x1 translation vector and \(s\) is a
- scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least .
- The estimated affine transform has a homogeneous scale which is a subclass of affine
- transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3
- points each.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - First input 3D point set.</dd>
- <dd><code>dst</code> - Second input 3D point set.
- Else the pointed-to variable will be set to the optimal scale.
- This might be unwanted, e.g. when optimizing a transform between a right- and a
- left-handed coordinate system.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>3D affine transformation matrix \(3 \times 4\) of the form
- \(T =
- \begin{bmatrix}
- R & t\\
- \end{bmatrix}
- \)</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffine3D-org.opencv.core.Mat-org.opencv.core.Mat-double:A-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffine3D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffine3D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- double[] scale)</pre>
- <div class="block">Computes an optimal affine transformation between two 3D point sets.
- It computes \(R,s,t\) minimizing \(\sum{i} dst_i - c \cdot R \cdot src_i \)
- where \(R\) is a 3x3 rotation matrix, \(t\) is a 3x1 translation vector and \(s\) is a
- scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least .
- The estimated affine transform has a homogeneous scale which is a subclass of affine
- transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3
- points each.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - First input 3D point set.</dd>
- <dd><code>dst</code> - Second input 3D point set.</dd>
- <dd><code>scale</code> - If null is passed, the scale parameter c will be assumed to be 1.0.
- Else the pointed-to variable will be set to the optimal scale.
- This might be unwanted, e.g. when optimizing a transform between a right- and a
- left-handed coordinate system.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>3D affine transformation matrix \(3 \times 4\) of the form
- \(T =
- \begin{bmatrix}
- R & t\\
- \end{bmatrix}
- \)</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffine3D-org.opencv.core.Mat-org.opencv.core.Mat-double:A-boolean-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffine3D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffine3D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- double[] scale,
- boolean force_rotation)</pre>
- <div class="block">Computes an optimal affine transformation between two 3D point sets.
- It computes \(R,s,t\) minimizing \(\sum{i} dst_i - c \cdot R \cdot src_i \)
- where \(R\) is a 3x3 rotation matrix, \(t\) is a 3x1 translation vector and \(s\) is a
- scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least .
- The estimated affine transform has a homogeneous scale which is a subclass of affine
- transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3
- points each.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - First input 3D point set.</dd>
- <dd><code>dst</code> - Second input 3D point set.</dd>
- <dd><code>scale</code> - If null is passed, the scale parameter c will be assumed to be 1.0.
- Else the pointed-to variable will be set to the optimal scale.</dd>
- <dd><code>force_rotation</code> - If true, the returned rotation will never be a reflection.
- This might be unwanted, e.g. when optimizing a transform between a right- and a
- left-handed coordinate system.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>3D affine transformation matrix \(3 \times 4\) of the form
- \(T =
- \begin{bmatrix}
- R & t\\
- \end{bmatrix}
- \)</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffine3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffine3D</h4>
- <pre>public static int estimateAffine3D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> out,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers)</pre>
- <div class="block">Computes an optimal affine transformation between two 3D point sets.
- It computes
- \(
- \begin{bmatrix}
- x\\
- y\\
- z\\
- \end{bmatrix}
- =
- \begin{bmatrix}
- a_{11} & a_{12} & a_{13}\\
- a_{21} & a_{22} & a_{23}\\
- a_{31} & a_{32} & a_{33}\\
- \end{bmatrix}
- \begin{bmatrix}
- X\\
- Y\\
- Z\\
- \end{bmatrix}
- +
- \begin{bmatrix}
- b_1\\
- b_2\\
- b_3\\
- \end{bmatrix}
- \)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - First input 3D point set containing \((X,Y,Z)\).</dd>
- <dd><code>dst</code> - Second input 3D point set containing \((x,y,z)\).</dd>
- <dd><code>out</code> - Output 3D affine transformation matrix \(3 \times 4\) of the form
- \(
- \begin{bmatrix}
- a_{11} & a_{12} & a_{13} & b_1\\
- a_{21} & a_{22} & a_{23} & b_2\\
- a_{31} & a_{32} & a_{33} & b_3\\
- \end{bmatrix}
- \)</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers (1-inlier, 0-outlier).
- an inlier.
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- The function estimates an optimal 3D affine transformation between two 3D point sets using the
- RANSAC algorithm.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffine3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffine3D</h4>
- <pre>public static int estimateAffine3D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> out,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- double ransacThreshold)</pre>
- <div class="block">Computes an optimal affine transformation between two 3D point sets.
- It computes
- \(
- \begin{bmatrix}
- x\\
- y\\
- z\\
- \end{bmatrix}
- =
- \begin{bmatrix}
- a_{11} & a_{12} & a_{13}\\
- a_{21} & a_{22} & a_{23}\\
- a_{31} & a_{32} & a_{33}\\
- \end{bmatrix}
- \begin{bmatrix}
- X\\
- Y\\
- Z\\
- \end{bmatrix}
- +
- \begin{bmatrix}
- b_1\\
- b_2\\
- b_3\\
- \end{bmatrix}
- \)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - First input 3D point set containing \((X,Y,Z)\).</dd>
- <dd><code>dst</code> - Second input 3D point set containing \((x,y,z)\).</dd>
- <dd><code>out</code> - Output 3D affine transformation matrix \(3 \times 4\) of the form
- \(
- \begin{bmatrix}
- a_{11} & a_{12} & a_{13} & b_1\\
- a_{21} & a_{22} & a_{23} & b_2\\
- a_{31} & a_{32} & a_{33} & b_3\\
- \end{bmatrix}
- \)</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers (1-inlier, 0-outlier).</dd>
- <dd><code>ransacThreshold</code> - Maximum reprojection error in the RANSAC algorithm to consider a point as
- an inlier.
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- The function estimates an optimal 3D affine transformation between two 3D point sets using the
- RANSAC algorithm.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffine3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffine3D</h4>
- <pre>public static int estimateAffine3D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> out,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- double ransacThreshold,
- double confidence)</pre>
- <div class="block">Computes an optimal affine transformation between two 3D point sets.
- It computes
- \(
- \begin{bmatrix}
- x\\
- y\\
- z\\
- \end{bmatrix}
- =
- \begin{bmatrix}
- a_{11} & a_{12} & a_{13}\\
- a_{21} & a_{22} & a_{23}\\
- a_{31} & a_{32} & a_{33}\\
- \end{bmatrix}
- \begin{bmatrix}
- X\\
- Y\\
- Z\\
- \end{bmatrix}
- +
- \begin{bmatrix}
- b_1\\
- b_2\\
- b_3\\
- \end{bmatrix}
- \)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - First input 3D point set containing \((X,Y,Z)\).</dd>
- <dd><code>dst</code> - Second input 3D point set containing \((x,y,z)\).</dd>
- <dd><code>out</code> - Output 3D affine transformation matrix \(3 \times 4\) of the form
- \(
- \begin{bmatrix}
- a_{11} & a_{12} & a_{13} & b_1\\
- a_{21} & a_{22} & a_{23} & b_2\\
- a_{31} & a_{32} & a_{33} & b_3\\
- \end{bmatrix}
- \)</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers (1-inlier, 0-outlier).</dd>
- <dd><code>ransacThreshold</code> - Maximum reprojection error in the RANSAC algorithm to consider a point as
- an inlier.</dd>
- <dd><code>confidence</code> - Confidence level, between 0 and 1, for the estimated transformation. Anything
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- The function estimates an optimal 3D affine transformation between two 3D point sets using the
- RANSAC algorithm.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffinePartial2D-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffinePartial2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffinePartial2D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to)</pre>
- <div class="block">Computes an optimal limited affine transformation with 4 degrees of freedom between
- two 2D point sets.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>from</code> - First input 2D point set.</dd>
- <dd><code>to</code> - Second input 2D point set.
- <ul>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- RANSAC is the default method.
- </li>
- </ul>
- a point as an inlier. Applies only to RANSAC.
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- Passing 0 will disable refining, so the output matrix will be output of robust method.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
- empty matrix if transformation could not be estimated.
- The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
- combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
- estimation.
- The computed transformation is then refined further (using only inliers) with the
- Levenberg-Marquardt method to reduce the re-projection error even more.
- Estimated transformation matrix is:
- \( \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
- \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
- \end{bmatrix} \)
- Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
- translations in \( x, y \) axes respectively.
- <b>Note:</b>
- The RANSAC method can handle practically any ratio of outliers but need a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers.
- SEE: estimateAffine2D, getAffineTransform</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffinePartial2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffinePartial2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffinePartial2D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers)</pre>
- <div class="block">Computes an optimal limited affine transformation with 4 degrees of freedom between
- two 2D point sets.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>from</code> - First input 2D point set.</dd>
- <dd><code>to</code> - Second input 2D point set.</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers.
- <ul>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- RANSAC is the default method.
- </li>
- </ul>
- a point as an inlier. Applies only to RANSAC.
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- Passing 0 will disable refining, so the output matrix will be output of robust method.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
- empty matrix if transformation could not be estimated.
- The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
- combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
- estimation.
- The computed transformation is then refined further (using only inliers) with the
- Levenberg-Marquardt method to reduce the re-projection error even more.
- Estimated transformation matrix is:
- \( \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
- \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
- \end{bmatrix} \)
- Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
- translations in \( x, y \) axes respectively.
- <b>Note:</b>
- The RANSAC method can handle practically any ratio of outliers but need a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers.
- SEE: estimateAffine2D, getAffineTransform</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffinePartial2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffinePartial2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffinePartial2D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method)</pre>
- <div class="block">Computes an optimal limited affine transformation with 4 degrees of freedom between
- two 2D point sets.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>from</code> - First input 2D point set.</dd>
- <dd><code>to</code> - Second input 2D point set.</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers.</dd>
- <dd><code>method</code> - Robust method used to compute transformation. The following methods are possible:
- <ul>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- RANSAC is the default method.
- </li>
- </ul>
- a point as an inlier. Applies only to RANSAC.
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- Passing 0 will disable refining, so the output matrix will be output of robust method.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
- empty matrix if transformation could not be estimated.
- The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
- combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
- estimation.
- The computed transformation is then refined further (using only inliers) with the
- Levenberg-Marquardt method to reduce the re-projection error even more.
- Estimated transformation matrix is:
- \( \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
- \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
- \end{bmatrix} \)
- Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
- translations in \( x, y \) axes respectively.
- <b>Note:</b>
- The RANSAC method can handle practically any ratio of outliers but need a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers.
- SEE: estimateAffine2D, getAffineTransform</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffinePartial2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffinePartial2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffinePartial2D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method,
- double ransacReprojThreshold)</pre>
- <div class="block">Computes an optimal limited affine transformation with 4 degrees of freedom between
- two 2D point sets.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>from</code> - First input 2D point set.</dd>
- <dd><code>to</code> - Second input 2D point set.</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers.</dd>
- <dd><code>method</code> - Robust method used to compute transformation. The following methods are possible:
- <ul>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- RANSAC is the default method.
- </li>
- </ul></dd>
- <dd><code>ransacReprojThreshold</code> - Maximum reprojection error in the RANSAC algorithm to consider
- a point as an inlier. Applies only to RANSAC.
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- Passing 0 will disable refining, so the output matrix will be output of robust method.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
- empty matrix if transformation could not be estimated.
- The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
- combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
- estimation.
- The computed transformation is then refined further (using only inliers) with the
- Levenberg-Marquardt method to reduce the re-projection error even more.
- Estimated transformation matrix is:
- \( \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
- \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
- \end{bmatrix} \)
- Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
- translations in \( x, y \) axes respectively.
- <b>Note:</b>
- The RANSAC method can handle practically any ratio of outliers but need a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers.
- SEE: estimateAffine2D, getAffineTransform</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffinePartial2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-long-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffinePartial2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffinePartial2D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method,
- double ransacReprojThreshold,
- long maxIters)</pre>
- <div class="block">Computes an optimal limited affine transformation with 4 degrees of freedom between
- two 2D point sets.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>from</code> - First input 2D point set.</dd>
- <dd><code>to</code> - Second input 2D point set.</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers.</dd>
- <dd><code>method</code> - Robust method used to compute transformation. The following methods are possible:
- <ul>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- RANSAC is the default method.
- </li>
- </ul></dd>
- <dd><code>ransacReprojThreshold</code> - Maximum reprojection error in the RANSAC algorithm to consider
- a point as an inlier. Applies only to RANSAC.</dd>
- <dd><code>maxIters</code> - The maximum number of robust method iterations.
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- Passing 0 will disable refining, so the output matrix will be output of robust method.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
- empty matrix if transformation could not be estimated.
- The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
- combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
- estimation.
- The computed transformation is then refined further (using only inliers) with the
- Levenberg-Marquardt method to reduce the re-projection error even more.
- Estimated transformation matrix is:
- \( \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
- \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
- \end{bmatrix} \)
- Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
- translations in \( x, y \) axes respectively.
- <b>Note:</b>
- The RANSAC method can handle practically any ratio of outliers but need a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers.
- SEE: estimateAffine2D, getAffineTransform</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffinePartial2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-long-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffinePartial2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffinePartial2D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method,
- double ransacReprojThreshold,
- long maxIters,
- double confidence)</pre>
- <div class="block">Computes an optimal limited affine transformation with 4 degrees of freedom between
- two 2D point sets.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>from</code> - First input 2D point set.</dd>
- <dd><code>to</code> - Second input 2D point set.</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers.</dd>
- <dd><code>method</code> - Robust method used to compute transformation. The following methods are possible:
- <ul>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- RANSAC is the default method.
- </li>
- </ul></dd>
- <dd><code>ransacReprojThreshold</code> - Maximum reprojection error in the RANSAC algorithm to consider
- a point as an inlier. Applies only to RANSAC.</dd>
- <dd><code>maxIters</code> - The maximum number of robust method iterations.</dd>
- <dd><code>confidence</code> - Confidence level, between 0 and 1, for the estimated transformation. Anything
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- Passing 0 will disable refining, so the output matrix will be output of robust method.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
- empty matrix if transformation could not be estimated.
- The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
- combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
- estimation.
- The computed transformation is then refined further (using only inliers) with the
- Levenberg-Marquardt method to reduce the re-projection error even more.
- Estimated transformation matrix is:
- \( \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
- \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
- \end{bmatrix} \)
- Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
- translations in \( x, y \) axes respectively.
- <b>Note:</b>
- The RANSAC method can handle practically any ratio of outliers but need a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers.
- SEE: estimateAffine2D, getAffineTransform</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateAffinePartial2D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-long-double-long-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateAffinePartial2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> estimateAffinePartial2D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> from,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> to,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int method,
- double ransacReprojThreshold,
- long maxIters,
- double confidence,
- long refineIters)</pre>
- <div class="block">Computes an optimal limited affine transformation with 4 degrees of freedom between
- two 2D point sets.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>from</code> - First input 2D point set.</dd>
- <dd><code>to</code> - Second input 2D point set.</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers.</dd>
- <dd><code>method</code> - Robust method used to compute transformation. The following methods are possible:
- <ul>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- RANSAC is the default method.
- </li>
- </ul></dd>
- <dd><code>ransacReprojThreshold</code> - Maximum reprojection error in the RANSAC algorithm to consider
- a point as an inlier. Applies only to RANSAC.</dd>
- <dd><code>maxIters</code> - The maximum number of robust method iterations.</dd>
- <dd><code>confidence</code> - Confidence level, between 0 and 1, for the estimated transformation. Anything
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.</dd>
- <dd><code>refineIters</code> - Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
- Passing 0 will disable refining, so the output matrix will be output of robust method.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
- empty matrix if transformation could not be estimated.
- The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
- combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
- estimation.
- The computed transformation is then refined further (using only inliers) with the
- Levenberg-Marquardt method to reduce the re-projection error even more.
- Estimated transformation matrix is:
- \( \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
- \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
- \end{bmatrix} \)
- Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
- translations in \( x, y \) axes respectively.
- <b>Note:</b>
- The RANSAC method can handle practically any ratio of outliers but need a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers.
- SEE: estimateAffine2D, getAffineTransform</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateChessboardSharpness-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateChessboardSharpness</h4>
- <pre>public static <a href="../../../org/opencv/core/Scalar.html" title="class in org.opencv.core">Scalar</a> estimateChessboardSharpness(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> corners)</pre>
- <div class="block">Estimates the sharpness of a detected chessboard.
- Image sharpness, as well as brightness, are a critical parameter for accuracte
- camera calibration. For accessing these parameters for filtering out
- problematic calibraiton images, this method calculates edge profiles by traveling from
- black to white chessboard cell centers. Based on this, the number of pixels is
- calculated required to transit from black to white. This width of the
- transition area is a good indication of how sharp the chessboard is imaged
- and should be below ~3.0 pixels.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>image</code> - Gray image used to find chessboard corners</dd>
- <dd><code>patternSize</code> - Size of a found chessboard pattern</dd>
- <dd><code>corners</code> - Corners found by #findChessboardCornersSB
- The optional sharpness array is of type CV_32FC1 and has for each calculated
- profile one row with the following five entries:
- 0 = x coordinate of the underlying edge in the image
- 1 = y coordinate of the underlying edge in the image
- 2 = width of the transition area (sharpness)
- 3 = signal strength in the black cell (min brightness)
- 4 = signal strength in the white cell (max brightness)</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Scalar(average sharpness, average min brightness, average max brightness,0)</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateChessboardSharpness-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-float-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateChessboardSharpness</h4>
- <pre>public static <a href="../../../org/opencv/core/Scalar.html" title="class in org.opencv.core">Scalar</a> estimateChessboardSharpness(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> corners,
- float rise_distance)</pre>
- <div class="block">Estimates the sharpness of a detected chessboard.
- Image sharpness, as well as brightness, are a critical parameter for accuracte
- camera calibration. For accessing these parameters for filtering out
- problematic calibraiton images, this method calculates edge profiles by traveling from
- black to white chessboard cell centers. Based on this, the number of pixels is
- calculated required to transit from black to white. This width of the
- transition area is a good indication of how sharp the chessboard is imaged
- and should be below ~3.0 pixels.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>image</code> - Gray image used to find chessboard corners</dd>
- <dd><code>patternSize</code> - Size of a found chessboard pattern</dd>
- <dd><code>corners</code> - Corners found by #findChessboardCornersSB</dd>
- <dd><code>rise_distance</code> - Rise distance 0.8 means 10% ... 90% of the final signal strength
- The optional sharpness array is of type CV_32FC1 and has for each calculated
- profile one row with the following five entries:
- 0 = x coordinate of the underlying edge in the image
- 1 = y coordinate of the underlying edge in the image
- 2 = width of the transition area (sharpness)
- 3 = signal strength in the black cell (min brightness)
- 4 = signal strength in the white cell (max brightness)</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Scalar(average sharpness, average min brightness, average max brightness,0)</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateChessboardSharpness-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-float-boolean-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateChessboardSharpness</h4>
- <pre>public static <a href="../../../org/opencv/core/Scalar.html" title="class in org.opencv.core">Scalar</a> estimateChessboardSharpness(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> corners,
- float rise_distance,
- boolean vertical)</pre>
- <div class="block">Estimates the sharpness of a detected chessboard.
- Image sharpness, as well as brightness, are a critical parameter for accuracte
- camera calibration. For accessing these parameters for filtering out
- problematic calibraiton images, this method calculates edge profiles by traveling from
- black to white chessboard cell centers. Based on this, the number of pixels is
- calculated required to transit from black to white. This width of the
- transition area is a good indication of how sharp the chessboard is imaged
- and should be below ~3.0 pixels.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>image</code> - Gray image used to find chessboard corners</dd>
- <dd><code>patternSize</code> - Size of a found chessboard pattern</dd>
- <dd><code>corners</code> - Corners found by #findChessboardCornersSB</dd>
- <dd><code>rise_distance</code> - Rise distance 0.8 means 10% ... 90% of the final signal strength</dd>
- <dd><code>vertical</code> - By default edge responses for horizontal lines are calculated
- The optional sharpness array is of type CV_32FC1 and has for each calculated
- profile one row with the following five entries:
- 0 = x coordinate of the underlying edge in the image
- 1 = y coordinate of the underlying edge in the image
- 2 = width of the transition area (sharpness)
- 3 = signal strength in the black cell (min brightness)
- 4 = signal strength in the white cell (max brightness)</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Scalar(average sharpness, average min brightness, average max brightness,0)</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateChessboardSharpness-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-float-boolean-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateChessboardSharpness</h4>
- <pre>public static <a href="../../../org/opencv/core/Scalar.html" title="class in org.opencv.core">Scalar</a> estimateChessboardSharpness(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> corners,
- float rise_distance,
- boolean vertical,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> sharpness)</pre>
- <div class="block">Estimates the sharpness of a detected chessboard.
- Image sharpness, as well as brightness, are a critical parameter for accuracte
- camera calibration. For accessing these parameters for filtering out
- problematic calibraiton images, this method calculates edge profiles by traveling from
- black to white chessboard cell centers. Based on this, the number of pixels is
- calculated required to transit from black to white. This width of the
- transition area is a good indication of how sharp the chessboard is imaged
- and should be below ~3.0 pixels.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>image</code> - Gray image used to find chessboard corners</dd>
- <dd><code>patternSize</code> - Size of a found chessboard pattern</dd>
- <dd><code>corners</code> - Corners found by #findChessboardCornersSB</dd>
- <dd><code>rise_distance</code> - Rise distance 0.8 means 10% ... 90% of the final signal strength</dd>
- <dd><code>vertical</code> - By default edge responses for horizontal lines are calculated</dd>
- <dd><code>sharpness</code> - Optional output array with a sharpness value for calculated edge responses (see description)
- The optional sharpness array is of type CV_32FC1 and has for each calculated
- profile one row with the following five entries:
- 0 = x coordinate of the underlying edge in the image
- 1 = y coordinate of the underlying edge in the image
- 2 = width of the transition area (sharpness)
- 3 = signal strength in the black cell (min brightness)
- 4 = signal strength in the white cell (max brightness)</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>Scalar(average sharpness, average min brightness, average max brightness,0)</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateTranslation3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateTranslation3D</h4>
- <pre>public static int estimateTranslation3D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> out,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers)</pre>
- <div class="block">Computes an optimal translation between two 3D point sets.
- It computes
- \(
- \begin{bmatrix}
- x\\
- y\\
- z\\
- \end{bmatrix}
- =
- \begin{bmatrix}
- X\\
- Y\\
- Z\\
- \end{bmatrix}
- +
- \begin{bmatrix}
- b_1\\
- b_2\\
- b_3\\
- \end{bmatrix}
- \)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - First input 3D point set containing \((X,Y,Z)\).</dd>
- <dd><code>dst</code> - Second input 3D point set containing \((x,y,z)\).</dd>
- <dd><code>out</code> - Output 3D translation vector \(3 \times 1\) of the form
- \(
- \begin{bmatrix}
- b_1 \\
- b_2 \\
- b_3 \\
- \end{bmatrix}
- \)</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers (1-inlier, 0-outlier).
- an inlier.
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- The function estimates an optimal 3D translation between two 3D point sets using the
- RANSAC algorithm.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateTranslation3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateTranslation3D</h4>
- <pre>public static int estimateTranslation3D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> out,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- double ransacThreshold)</pre>
- <div class="block">Computes an optimal translation between two 3D point sets.
- It computes
- \(
- \begin{bmatrix}
- x\\
- y\\
- z\\
- \end{bmatrix}
- =
- \begin{bmatrix}
- X\\
- Y\\
- Z\\
- \end{bmatrix}
- +
- \begin{bmatrix}
- b_1\\
- b_2\\
- b_3\\
- \end{bmatrix}
- \)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - First input 3D point set containing \((X,Y,Z)\).</dd>
- <dd><code>dst</code> - Second input 3D point set containing \((x,y,z)\).</dd>
- <dd><code>out</code> - Output 3D translation vector \(3 \times 1\) of the form
- \(
- \begin{bmatrix}
- b_1 \\
- b_2 \\
- b_3 \\
- \end{bmatrix}
- \)</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers (1-inlier, 0-outlier).</dd>
- <dd><code>ransacThreshold</code> - Maximum reprojection error in the RANSAC algorithm to consider a point as
- an inlier.
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- The function estimates an optimal 3D translation between two 3D point sets using the
- RANSAC algorithm.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="estimateTranslation3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>estimateTranslation3D</h4>
- <pre>public static int estimateTranslation3D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> out,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- double ransacThreshold,
- double confidence)</pre>
- <div class="block">Computes an optimal translation between two 3D point sets.
- It computes
- \(
- \begin{bmatrix}
- x\\
- y\\
- z\\
- \end{bmatrix}
- =
- \begin{bmatrix}
- X\\
- Y\\
- Z\\
- \end{bmatrix}
- +
- \begin{bmatrix}
- b_1\\
- b_2\\
- b_3\\
- \end{bmatrix}
- \)</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - First input 3D point set containing \((X,Y,Z)\).</dd>
- <dd><code>dst</code> - Second input 3D point set containing \((x,y,z)\).</dd>
- <dd><code>out</code> - Output 3D translation vector \(3 \times 1\) of the form
- \(
- \begin{bmatrix}
- b_1 \\
- b_2 \\
- b_3 \\
- \end{bmatrix}
- \)</dd>
- <dd><code>inliers</code> - Output vector indicating which points are inliers (1-inlier, 0-outlier).</dd>
- <dd><code>ransacThreshold</code> - Maximum reprojection error in the RANSAC algorithm to consider a point as
- an inlier.</dd>
- <dd><code>confidence</code> - Confidence level, between 0 and 1, for the estimated transformation. Anything
- between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- The function estimates an optimal 3D translation between two 3D point sets using the
- RANSAC algorithm.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="filterHomographyDecompByVisibleRefpoints-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>filterHomographyDecompByVisibleRefpoints</h4>
- <pre>public static void filterHomographyDecompByVisibleRefpoints(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rotations,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> normals,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> beforePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> afterPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> possibleSolutions)</pre>
- <div class="block">Filters homography decompositions based on additional information.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>rotations</code> - Vector of rotation matrices.</dd>
- <dd><code>normals</code> - Vector of plane normal matrices.</dd>
- <dd><code>beforePoints</code> - Vector of (rectified) visible reference points before the homography is applied</dd>
- <dd><code>afterPoints</code> - Vector of (rectified) visible reference points after the homography is applied</dd>
- <dd><code>possibleSolutions</code> - Vector of int indices representing the viable solution set after filtering
- This function is intended to filter the output of the #decomposeHomographyMat based on additional
- information as described in CITE: Malis2007 . The summary of the method: the #decomposeHomographyMat function
- returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the
- sets of points visible in the camera frame before and after the homography transformation is applied,
- we can determine which are the true potential solutions and which are the opposites by verifying which
- homographies are consistent with all visible reference points being in front of the camera. The inputs
- are left unchanged; the filtered solution set is returned as indices into the existing one.</dd>
- </dl>
- </li>
- </ul>
- <a name="filterHomographyDecompByVisibleRefpoints-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>filterHomographyDecompByVisibleRefpoints</h4>
- <pre>public static void filterHomographyDecompByVisibleRefpoints(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rotations,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> normals,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> beforePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> afterPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> possibleSolutions,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> pointsMask)</pre>
- <div class="block">Filters homography decompositions based on additional information.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>rotations</code> - Vector of rotation matrices.</dd>
- <dd><code>normals</code> - Vector of plane normal matrices.</dd>
- <dd><code>beforePoints</code> - Vector of (rectified) visible reference points before the homography is applied</dd>
- <dd><code>afterPoints</code> - Vector of (rectified) visible reference points after the homography is applied</dd>
- <dd><code>possibleSolutions</code> - Vector of int indices representing the viable solution set after filtering</dd>
- <dd><code>pointsMask</code> - optional Mat/Vector of 8u type representing the mask for the inliers as given by the #findHomography function
- This function is intended to filter the output of the #decomposeHomographyMat based on additional
- information as described in CITE: Malis2007 . The summary of the method: the #decomposeHomographyMat function
- returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the
- sets of points visible in the camera frame before and after the homography transformation is applied,
- we can determine which are the true potential solutions and which are the opposites by verifying which
- homographies are consistent with all visible reference points being in front of the camera. The inputs
- are left unchanged; the filtered solution set is returned as indices into the existing one.</dd>
- </dl>
- </li>
- </ul>
- <a name="filterSpeckles-org.opencv.core.Mat-double-int-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>filterSpeckles</h4>
- <pre>public static void filterSpeckles(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> img,
- double newVal,
- int maxSpeckleSize,
- double maxDiff)</pre>
- <div class="block">Filters off small noise blobs (speckles) in the disparity map</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>img</code> - The input 16-bit signed disparity image</dd>
- <dd><code>newVal</code> - The disparity value used to paint-off the speckles</dd>
- <dd><code>maxSpeckleSize</code> - The maximum speckle size to consider it a speckle. Larger blobs are not
- affected by the algorithm</dd>
- <dd><code>maxDiff</code> - Maximum difference between neighbor disparity pixels to put them into the same
- blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
- disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
- account when specifying this parameter value.</dd>
- </dl>
- </li>
- </ul>
- <a name="filterSpeckles-org.opencv.core.Mat-double-int-double-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>filterSpeckles</h4>
- <pre>public static void filterSpeckles(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> img,
- double newVal,
- int maxSpeckleSize,
- double maxDiff,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> buf)</pre>
- <div class="block">Filters off small noise blobs (speckles) in the disparity map</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>img</code> - The input 16-bit signed disparity image</dd>
- <dd><code>newVal</code> - The disparity value used to paint-off the speckles</dd>
- <dd><code>maxSpeckleSize</code> - The maximum speckle size to consider it a speckle. Larger blobs are not
- affected by the algorithm</dd>
- <dd><code>maxDiff</code> - Maximum difference between neighbor disparity pixels to put them into the same
- blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
- disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
- account when specifying this parameter value.</dd>
- <dd><code>buf</code> - The optional temporary buffer to avoid memory allocation within the function.</dd>
- </dl>
- </li>
- </ul>
- <a name="find4QuadCornerSubpix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>find4QuadCornerSubpix</h4>
- <pre>public static boolean find4QuadCornerSubpix(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> img,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> corners,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> region_size)</pre>
- </li>
- </ul>
- <a name="findChessboardCorners-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.MatOfPoint2f-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findChessboardCorners</h4>
- <pre>public static boolean findChessboardCorners(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> corners)</pre>
- <div class="block">Finds the positions of internal corners of the chessboard.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>image</code> - Source chessboard view. It must be an 8-bit grayscale or color image.</dd>
- <dd><code>patternSize</code> - Number of inner corners per a chessboard row and column
- ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).</dd>
- <dd><code>corners</code> - Output array of detected corners.
- <ul>
- <li>
- REF: CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black
- and white, rather than a fixed threshold level (computed from the average image brightness).
- </li>
- <li>
- REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with #equalizeHist before
- applying fixed or adaptive thresholding.
- </li>
- <li>
- REF: CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter,
- square-like shape) to filter out false quads extracted at the contour retrieval stage.
- </li>
- <li>
- REF: CALIB_CB_FAST_CHECK Run a fast check on the image that looks for chessboard corners,
- and shortcut the call if none is found. This can drastically speed up the call in the
- degenerate condition when no chessboard is observed.
- </li>
- </ul>
- The function attempts to determine whether the input image is a view of the chessboard pattern and
- locate the internal chessboard corners. The function returns a non-zero value if all of the corners
- are found and they are placed in a certain order (row by row, left to right in every row).
- Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example,
- a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black
- squares touch each other. The detected coordinates are approximate, and to determine their positions
- more accurately, the function calls #cornerSubPix. You also may use the function #cornerSubPix with
- different parameters if returned coordinates are not accurate enough.
- Sample usage of detecting and drawing chessboard corners: :
- <code>
- Size patternsize(8,6); //interior number of corners
- Mat gray = ....; //source image
- vector<Point2f> corners; //this will be filled by the detected corners
- //CALIB_CB_FAST_CHECK saves a lot of time on images
- //that do not contain any chessboard corners
- bool patternfound = findChessboardCorners(gray, patternsize, corners,
- CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
- + CALIB_CB_FAST_CHECK);
- if(patternfound)
- cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
- TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
- drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
- </code>
- <b>Note:</b> The function requires white space (like a square-thick border, the wider the better) around
- the board to make the detection more robust in various environments. Otherwise, if there is no
- border and the background is dark, the outer black squares cannot be segmented properly and so the
- square grouping and ordering algorithm fails.
- Use gen_pattern.py (REF: tutorial_camera_calibration_pattern) to create checkerboard.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findChessboardCorners-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.MatOfPoint2f-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findChessboardCorners</h4>
- <pre>public static boolean findChessboardCorners(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> corners,
- int flags)</pre>
- <div class="block">Finds the positions of internal corners of the chessboard.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>image</code> - Source chessboard view. It must be an 8-bit grayscale or color image.</dd>
- <dd><code>patternSize</code> - Number of inner corners per a chessboard row and column
- ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).</dd>
- <dd><code>corners</code> - Output array of detected corners.</dd>
- <dd><code>flags</code> - Various operation flags that can be zero or a combination of the following values:
- <ul>
- <li>
- REF: CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black
- and white, rather than a fixed threshold level (computed from the average image brightness).
- </li>
- <li>
- REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with #equalizeHist before
- applying fixed or adaptive thresholding.
- </li>
- <li>
- REF: CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter,
- square-like shape) to filter out false quads extracted at the contour retrieval stage.
- </li>
- <li>
- REF: CALIB_CB_FAST_CHECK Run a fast check on the image that looks for chessboard corners,
- and shortcut the call if none is found. This can drastically speed up the call in the
- degenerate condition when no chessboard is observed.
- </li>
- </ul>
- The function attempts to determine whether the input image is a view of the chessboard pattern and
- locate the internal chessboard corners. The function returns a non-zero value if all of the corners
- are found and they are placed in a certain order (row by row, left to right in every row).
- Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example,
- a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black
- squares touch each other. The detected coordinates are approximate, and to determine their positions
- more accurately, the function calls #cornerSubPix. You also may use the function #cornerSubPix with
- different parameters if returned coordinates are not accurate enough.
- Sample usage of detecting and drawing chessboard corners: :
- <code>
- Size patternsize(8,6); //interior number of corners
- Mat gray = ....; //source image
- vector<Point2f> corners; //this will be filled by the detected corners
- //CALIB_CB_FAST_CHECK saves a lot of time on images
- //that do not contain any chessboard corners
- bool patternfound = findChessboardCorners(gray, patternsize, corners,
- CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
- + CALIB_CB_FAST_CHECK);
- if(patternfound)
- cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
- TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
- drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
- </code>
- <b>Note:</b> The function requires white space (like a square-thick border, the wider the better) around
- the board to make the detection more robust in various environments. Otherwise, if there is no
- border and the background is dark, the outer black squares cannot be segmented properly and so the
- square grouping and ordering algorithm fails.
- Use gen_pattern.py (REF: tutorial_camera_calibration_pattern) to create checkerboard.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findChessboardCornersSB-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findChessboardCornersSB</h4>
- <pre>public static boolean findChessboardCornersSB(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> corners)</pre>
- </li>
- </ul>
- <a name="findChessboardCornersSB-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findChessboardCornersSB</h4>
- <pre>public static boolean findChessboardCornersSB(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> corners,
- int flags)</pre>
- </li>
- </ul>
- <a name="findChessboardCornersSBWithMeta-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-int-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findChessboardCornersSBWithMeta</h4>
- <pre>public static boolean findChessboardCornersSBWithMeta(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> corners,
- int flags,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> meta)</pre>
- <div class="block">Finds the positions of internal corners of the chessboard using a sector based approach.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>image</code> - Source chessboard view. It must be an 8-bit grayscale or color image.</dd>
- <dd><code>patternSize</code> - Number of inner corners per a chessboard row and column
- ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).</dd>
- <dd><code>corners</code> - Output array of detected corners.</dd>
- <dd><code>flags</code> - Various operation flags that can be zero or a combination of the following values:
- <ul>
- <li>
- REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before detection.
- </li>
- <li>
- REF: CALIB_CB_EXHAUSTIVE Run an exhaustive search to improve detection rate.
- </li>
- <li>
- REF: CALIB_CB_ACCURACY Up sample input image to improve sub-pixel accuracy due to aliasing effects.
- </li>
- <li>
- REF: CALIB_CB_LARGER The detected pattern is allowed to be larger than patternSize (see description).
- </li>
- <li>
- REF: CALIB_CB_MARKER The detected pattern must have a marker (see description).
- This should be used if an accurate camera calibration is required.
- </li>
- </ul></dd>
- <dd><code>meta</code> - Optional output arrray of detected corners (CV_8UC1 and size = cv::Size(columns,rows)).
- Each entry stands for one corner of the pattern and can have one of the following values:
- <ul>
- <li>
- 0 = no meta data attached
- </li>
- <li>
- 1 = left-top corner of a black cell
- </li>
- <li>
- 2 = left-top corner of a white cell
- </li>
- <li>
- 3 = left-top corner of a black cell with a white marker dot
- </li>
- <li>
- 4 = left-top corner of a white cell with a black marker dot (pattern origin in case of markers otherwise first corner)
- </li>
- </ul>
- The function is analog to #findChessboardCorners but uses a localized radon
- transformation approximated by box filters being more robust to all sort of
- noise, faster on larger images and is able to directly return the sub-pixel
- position of the internal chessboard corners. The Method is based on the paper
- CITE: duda2018 "Accurate Detection and Localization of Checkerboard Corners for
- Calibration" demonstrating that the returned sub-pixel positions are more
- accurate than the one returned by cornerSubPix allowing a precise camera
- calibration for demanding applications.
- In the case, the flags REF: CALIB_CB_LARGER or REF: CALIB_CB_MARKER are given,
- the result can be recovered from the optional meta array. Both flags are
- helpful to use calibration patterns exceeding the field of view of the camera.
- These oversized patterns allow more accurate calibrations as corners can be
- utilized, which are as close as possible to the image borders. For a
- consistent coordinate system across all images, the optional marker (see image
- below) can be used to move the origin of the board to the location where the
- black circle is located.
- <b>Note:</b> The function requires a white boarder with roughly the same width as one
- of the checkerboard fields around the whole board to improve the detection in
- various environments. In addition, because of the localized radon
- transformation it is beneficial to use round corners for the field corners
- which are located on the outside of the board. The following figure illustrates
- a sample checkerboard optimized for the detection. However, any other checkerboard
- can be used as well.
- Use gen_pattern.py (REF: tutorial_camera_calibration_pattern) to create checkerboard.
- </dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findCirclesGrid-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findCirclesGrid</h4>
- <pre>public static boolean findCirclesGrid(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> centers)</pre>
- </li>
- </ul>
- <a name="findCirclesGrid-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findCirclesGrid</h4>
- <pre>public static boolean findCirclesGrid(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> image,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> patternSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> centers,
- int flags)</pre>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2)</pre>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .
- are feature points from cameras with same focal length and principal point.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul>
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- confidence (probability) that the estimated matrix is correct.
- for the other points. The array is computed only in the RANSAC and LMedS methods.
- This function differs from the one above that it computes camera intrinsic matrix from focal length and
- principal point:
- \(A =
- \begin{bmatrix}
- f & 0 & x_{pp} \\
- 0 & f & y_{pp} \\
- 0 & 0 & 1
- \end{bmatrix}\)</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- double focal)</pre>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>focal</code> - focal length of the camera. Note that this function assumes that points1 and points2
- are feature points from cameras with same focal length and principal point.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul>
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- confidence (probability) that the estimated matrix is correct.
- for the other points. The array is computed only in the RANSAC and LMedS methods.
- This function differs from the one above that it computes camera intrinsic matrix from focal length and
- principal point:
- \(A =
- \begin{bmatrix}
- f & 0 & x_{pp} \\
- 0 & f & y_{pp} \\
- 0 & 0 & 1
- \end{bmatrix}\)</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Point-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- double focal,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> pp)</pre>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>focal</code> - focal length of the camera. Note that this function assumes that points1 and points2
- are feature points from cameras with same focal length and principal point.</dd>
- <dd><code>pp</code> - principal point of the camera.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul>
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- confidence (probability) that the estimated matrix is correct.
- for the other points. The array is computed only in the RANSAC and LMedS methods.
- This function differs from the one above that it computes camera intrinsic matrix from focal length and
- principal point:
- \(A =
- \begin{bmatrix}
- f & 0 & x_{pp} \\
- 0 & f & y_{pp} \\
- 0 & 0 & 1
- \end{bmatrix}\)</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Point-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- double focal,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> pp,
- int method)</pre>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>focal</code> - focal length of the camera. Note that this function assumes that points1 and points2
- are feature points from cameras with same focal length and principal point.</dd>
- <dd><code>pp</code> - principal point of the camera.</dd>
- <dd><code>method</code> - Method for computing a fundamental matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul>
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- confidence (probability) that the estimated matrix is correct.
- for the other points. The array is computed only in the RANSAC and LMedS methods.
- This function differs from the one above that it computes camera intrinsic matrix from focal length and
- principal point:
- \(A =
- \begin{bmatrix}
- f & 0 & x_{pp} \\
- 0 & f & y_{pp} \\
- 0 & 0 & 1
- \end{bmatrix}\)</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Point-int-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- double focal,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> pp,
- int method,
- double prob)</pre>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>focal</code> - focal length of the camera. Note that this function assumes that points1 and points2
- are feature points from cameras with same focal length and principal point.</dd>
- <dd><code>pp</code> - principal point of the camera.</dd>
- <dd><code>method</code> - Method for computing a fundamental matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul>
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.</dd>
- <dd><code>prob</code> - Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- confidence (probability) that the estimated matrix is correct.
- for the other points. The array is computed only in the RANSAC and LMedS methods.
- This function differs from the one above that it computes camera intrinsic matrix from focal length and
- principal point:
- \(A =
- \begin{bmatrix}
- f & 0 & x_{pp} \\
- 0 & f & y_{pp} \\
- 0 & 0 & 1
- \end{bmatrix}\)</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Point-int-double-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- double focal,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> pp,
- int method,
- double prob,
- double threshold)</pre>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>focal</code> - focal length of the camera. Note that this function assumes that points1 and points2
- are feature points from cameras with same focal length and principal point.</dd>
- <dd><code>pp</code> - principal point of the camera.</dd>
- <dd><code>method</code> - Method for computing a fundamental matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul></dd>
- <dd><code>threshold</code> - Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.</dd>
- <dd><code>prob</code> - Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- confidence (probability) that the estimated matrix is correct.
- for the other points. The array is computed only in the RANSAC and LMedS methods.
- This function differs from the one above that it computes camera intrinsic matrix from focal length and
- principal point:
- \(A =
- \begin{bmatrix}
- f & 0 & x_{pp} \\
- 0 & f & y_{pp} \\
- 0 & 0 & 1
- \end{bmatrix}\)</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Point-int-double-double-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- double focal,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> pp,
- int method,
- double prob,
- double threshold,
- int maxIters)</pre>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>focal</code> - focal length of the camera. Note that this function assumes that points1 and points2
- are feature points from cameras with same focal length and principal point.</dd>
- <dd><code>pp</code> - principal point of the camera.</dd>
- <dd><code>method</code> - Method for computing a fundamental matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul></dd>
- <dd><code>threshold</code> - Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.</dd>
- <dd><code>prob</code> - Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- confidence (probability) that the estimated matrix is correct.
- for the other points. The array is computed only in the RANSAC and LMedS methods.</dd>
- <dd><code>maxIters</code> - The maximum number of robust method iterations.
- This function differs from the one above that it computes camera intrinsic matrix from focal length and
- principal point:
- \(A =
- \begin{bmatrix}
- f & 0 & x_{pp} \\
- 0 & f & y_{pp} \\
- 0 & 0 & 1
- \end{bmatrix}\)</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Point-int-double-double-int-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- double focal,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> pp,
- int method,
- double prob,
- double threshold,
- int maxIters,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</pre>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>focal</code> - focal length of the camera. Note that this function assumes that points1 and points2
- are feature points from cameras with same focal length and principal point.</dd>
- <dd><code>pp</code> - principal point of the camera.</dd>
- <dd><code>method</code> - Method for computing a fundamental matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul></dd>
- <dd><code>threshold</code> - Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.</dd>
- <dd><code>prob</code> - Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- confidence (probability) that the estimated matrix is correct.</dd>
- <dd><code>mask</code> - Output array of N elements, every element of which is set to 0 for outliers and to 1
- for the other points. The array is computed only in the RANSAC and LMedS methods.</dd>
- <dd><code>maxIters</code> - The maximum number of robust method iterations.
- This function differs from the one above that it computes camera intrinsic matrix from focal length and
- principal point:
- \(A =
- \begin{bmatrix}
- f & 0 & x_{pp} \\
- 0 & f & y_{pp} \\
- 0 & 0 & 1
- \end{bmatrix}\)</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix)</pre>
- <div class="block">Calculates an essential matrix from the corresponding points in two images.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix</code> - Camera intrinsic matrix \(\cameramatrix{A}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera intrinsic matrix. If this assumption does not hold for your use case, use
- #undistortPoints with <code>P = cv::NoArray()</code> for both cameras to transform image points
- to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
- passing these coordinates, pass the identity matrix for this parameter.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul>
- confidence (probability) that the estimated matrix is correct.
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- for the other points. The array is computed only in the RANSAC and LMedS methods.
- This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
- where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
- second images, respectively. The result of this function may be passed further to
- #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- int method)</pre>
- <div class="block">Calculates an essential matrix from the corresponding points in two images.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix</code> - Camera intrinsic matrix \(\cameramatrix{A}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera intrinsic matrix. If this assumption does not hold for your use case, use
- #undistortPoints with <code>P = cv::NoArray()</code> for both cameras to transform image points
- to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
- passing these coordinates, pass the identity matrix for this parameter.</dd>
- <dd><code>method</code> - Method for computing an essential matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul>
- confidence (probability) that the estimated matrix is correct.
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- for the other points. The array is computed only in the RANSAC and LMedS methods.
- This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
- where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
- second images, respectively. The result of this function may be passed further to
- #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- int method,
- double prob)</pre>
- <div class="block">Calculates an essential matrix from the corresponding points in two images.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix</code> - Camera intrinsic matrix \(\cameramatrix{A}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera intrinsic matrix. If this assumption does not hold for your use case, use
- #undistortPoints with <code>P = cv::NoArray()</code> for both cameras to transform image points
- to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
- passing these coordinates, pass the identity matrix for this parameter.</dd>
- <dd><code>method</code> - Method for computing an essential matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul></dd>
- <dd><code>prob</code> - Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- confidence (probability) that the estimated matrix is correct.
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- for the other points. The array is computed only in the RANSAC and LMedS methods.
- This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
- where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
- second images, respectively. The result of this function may be passed further to
- #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- int method,
- double prob,
- double threshold)</pre>
- <div class="block">Calculates an essential matrix from the corresponding points in two images.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix</code> - Camera intrinsic matrix \(\cameramatrix{A}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera intrinsic matrix. If this assumption does not hold for your use case, use
- #undistortPoints with <code>P = cv::NoArray()</code> for both cameras to transform image points
- to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
- passing these coordinates, pass the identity matrix for this parameter.</dd>
- <dd><code>method</code> - Method for computing an essential matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul></dd>
- <dd><code>prob</code> - Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- confidence (probability) that the estimated matrix is correct.</dd>
- <dd><code>threshold</code> - Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- for the other points. The array is computed only in the RANSAC and LMedS methods.
- This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
- where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
- second images, respectively. The result of this function may be passed further to
- #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-double-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- int method,
- double prob,
- double threshold,
- int maxIters)</pre>
- <div class="block">Calculates an essential matrix from the corresponding points in two images.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix</code> - Camera intrinsic matrix \(\cameramatrix{A}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera intrinsic matrix. If this assumption does not hold for your use case, use
- #undistortPoints with <code>P = cv::NoArray()</code> for both cameras to transform image points
- to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
- passing these coordinates, pass the identity matrix for this parameter.</dd>
- <dd><code>method</code> - Method for computing an essential matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul></dd>
- <dd><code>prob</code> - Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- confidence (probability) that the estimated matrix is correct.</dd>
- <dd><code>threshold</code> - Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- for the other points. The array is computed only in the RANSAC and LMedS methods.</dd>
- <dd><code>maxIters</code> - The maximum number of robust method iterations.
- This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
- where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
- second images, respectively. The result of this function may be passed further to
- #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-double-int-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- int method,
- double prob,
- double threshold,
- int maxIters,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</pre>
- <div class="block">Calculates an essential matrix from the corresponding points in two images.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix</code> - Camera intrinsic matrix \(\cameramatrix{A}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera intrinsic matrix. If this assumption does not hold for your use case, use
- #undistortPoints with <code>P = cv::NoArray()</code> for both cameras to transform image points
- to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
- passing these coordinates, pass the identity matrix for this parameter.</dd>
- <dd><code>method</code> - Method for computing an essential matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul></dd>
- <dd><code>prob</code> - Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- confidence (probability) that the estimated matrix is correct.</dd>
- <dd><code>threshold</code> - Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.</dd>
- <dd><code>mask</code> - Output array of N elements, every element of which is set to 0 for outliers and to 1
- for the other points. The array is computed only in the RANSAC and LMedS methods.</dd>
- <dd><code>maxIters</code> - The maximum number of robust method iterations.
- This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
- where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
- second images, respectively. The result of this function may be passed further to
- #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2)</pre>
- <div class="block">Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix1</code> - Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera matrix. If this assumption does not hold for your use case, use
- #undistortPoints with <code>P = cv::NoArray()</code> for both cameras to transform image points
- to normalized image coordinates, which are valid for the identity camera matrix. When
- passing these coordinates, pass the identity matrix for this parameter.</dd>
- <dd><code>cameraMatrix2</code> - Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera matrix. If this assumption does not hold for your use case, use
- #undistortPoints with <code>P = cv::NoArray()</code> for both cameras to transform image points
- to normalized image coordinates, which are valid for the identity camera matrix. When
- passing these coordinates, pass the identity matrix for this parameter.</dd>
- <dd><code>distCoeffs1</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>distCoeffs2</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul>
- confidence (probability) that the estimated matrix is correct.
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- for the other points. The array is computed only in the RANSAC and LMedS methods.
- This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
- where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
- second images, respectively. The result of this function may be passed further to
- #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- int method)</pre>
- <div class="block">Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix1</code> - Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera matrix. If this assumption does not hold for your use case, use
- #undistortPoints with <code>P = cv::NoArray()</code> for both cameras to transform image points
- to normalized image coordinates, which are valid for the identity camera matrix. When
- passing these coordinates, pass the identity matrix for this parameter.</dd>
- <dd><code>cameraMatrix2</code> - Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera matrix. If this assumption does not hold for your use case, use
- #undistortPoints with <code>P = cv::NoArray()</code> for both cameras to transform image points
- to normalized image coordinates, which are valid for the identity camera matrix. When
- passing these coordinates, pass the identity matrix for this parameter.</dd>
- <dd><code>distCoeffs1</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>distCoeffs2</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>method</code> - Method for computing an essential matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul>
- confidence (probability) that the estimated matrix is correct.
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- for the other points. The array is computed only in the RANSAC and LMedS methods.
- This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
- where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
- second images, respectively. The result of this function may be passed further to
- #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- int method,
- double prob)</pre>
- <div class="block">Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix1</code> - Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera matrix. If this assumption does not hold for your use case, use
- #undistortPoints with <code>P = cv::NoArray()</code> for both cameras to transform image points
- to normalized image coordinates, which are valid for the identity camera matrix. When
- passing these coordinates, pass the identity matrix for this parameter.</dd>
- <dd><code>cameraMatrix2</code> - Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera matrix. If this assumption does not hold for your use case, use
- #undistortPoints with <code>P = cv::NoArray()</code> for both cameras to transform image points
- to normalized image coordinates, which are valid for the identity camera matrix. When
- passing these coordinates, pass the identity matrix for this parameter.</dd>
- <dd><code>distCoeffs1</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>distCoeffs2</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>method</code> - Method for computing an essential matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul></dd>
- <dd><code>prob</code> - Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- confidence (probability) that the estimated matrix is correct.
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- for the other points. The array is computed only in the RANSAC and LMedS methods.
- This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
- where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
- second images, respectively. The result of this function may be passed further to
- #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- int method,
- double prob,
- double threshold)</pre>
- <div class="block">Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix1</code> - Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera matrix. If this assumption does not hold for your use case, use
- #undistortPoints with <code>P = cv::NoArray()</code> for both cameras to transform image points
- to normalized image coordinates, which are valid for the identity camera matrix. When
- passing these coordinates, pass the identity matrix for this parameter.</dd>
- <dd><code>cameraMatrix2</code> - Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera matrix. If this assumption does not hold for your use case, use
- #undistortPoints with <code>P = cv::NoArray()</code> for both cameras to transform image points
- to normalized image coordinates, which are valid for the identity camera matrix. When
- passing these coordinates, pass the identity matrix for this parameter.</dd>
- <dd><code>distCoeffs1</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>distCoeffs2</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>method</code> - Method for computing an essential matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul></dd>
- <dd><code>prob</code> - Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- confidence (probability) that the estimated matrix is correct.</dd>
- <dd><code>threshold</code> - Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- for the other points. The array is computed only in the RANSAC and LMedS methods.
- This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
- where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
- second images, respectively. The result of this function may be passed further to
- #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-double-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- int method,
- double prob,
- double threshold,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</pre>
- <div class="block">Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N (N >= 5) 2D points from the first image. The point coordinates should
- be floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix1</code> - Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera matrix. If this assumption does not hold for your use case, use
- #undistortPoints with <code>P = cv::NoArray()</code> for both cameras to transform image points
- to normalized image coordinates, which are valid for the identity camera matrix. When
- passing these coordinates, pass the identity matrix for this parameter.</dd>
- <dd><code>cameraMatrix2</code> - Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera matrix. If this assumption does not hold for your use case, use
- #undistortPoints with <code>P = cv::NoArray()</code> for both cameras to transform image points
- to normalized image coordinates, which are valid for the identity camera matrix. When
- passing these coordinates, pass the identity matrix for this parameter.</dd>
- <dd><code>distCoeffs1</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>distCoeffs2</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>method</code> - Method for computing an essential matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul></dd>
- <dd><code>prob</code> - Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- confidence (probability) that the estimated matrix is correct.</dd>
- <dd><code>threshold</code> - Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.</dd>
- <dd><code>mask</code> - Output array of N elements, every element of which is set to 0 for outliers and to 1
- for the other points. The array is computed only in the RANSAC and LMedS methods.
- This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
- CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
- \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
- where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
- second images, respectively. The result of this function may be passed further to
- #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findEssentialMat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.calib3d.UsacParams-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findEssentialMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findEssentialMat(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dist_coeff1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dist_coeff2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask,
- <a href="../../../org/opencv/calib3d/UsacParams.html" title="class in org.opencv.calib3d">UsacParams</a> params)</pre>
- </li>
- </ul>
- <a name="findFundamentalMat-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findFundamentalMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findFundamentalMat(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points1,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points2)</pre>
- </li>
- </ul>
- <a name="findFundamentalMat-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findFundamentalMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findFundamentalMat(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points1,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points2,
- int method)</pre>
- </li>
- </ul>
- <a name="findFundamentalMat-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findFundamentalMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findFundamentalMat(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points1,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points2,
- int method,
- double ransacReprojThreshold)</pre>
- </li>
- </ul>
- <a name="findFundamentalMat-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findFundamentalMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findFundamentalMat(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points1,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points2,
- int method,
- double ransacReprojThreshold,
- double confidence)</pre>
- </li>
- </ul>
- <a name="findFundamentalMat-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-double-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findFundamentalMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findFundamentalMat(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points1,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points2,
- int method,
- double ransacReprojThreshold,
- double confidence,
- int maxIters)</pre>
- <div class="block">Calculates a fundamental matrix from the corresponding points in two images.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N points from the first image. The point coordinates should be
- floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>method</code> - Method for computing a fundamental matrix.
- <ul>
- <li>
- REF: FM_7POINT for a 7-point algorithm. \(N = 7\)
- </li>
- <li>
- REF: FM_8POINT for an 8-point algorithm. \(N \ge 8\)
- </li>
- <li>
- REF: FM_RANSAC for the RANSAC algorithm. \(N \ge 8\)
- </li>
- <li>
- REF: FM_LMEDS for the LMedS algorithm. \(N \ge 8\)
- </li>
- </ul></dd>
- <dd><code>ransacReprojThreshold</code> - Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.</dd>
- <dd><code>confidence</code> - Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level
- of confidence (probability) that the estimated matrix is correct.</dd>
- <dd><code>maxIters</code> - The maximum number of robust method iterations.
- The epipolar geometry is described by the following equation:
- \([p_2; 1]^T F [p_1; 1] = 0\)
- where \(F\) is a fundamental matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
- second images, respectively.
- The function calculates the fundamental matrix using one of four methods listed above and returns
- the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point
- algorithm, the function may return up to 3 solutions ( \(9 \times 3\) matrix that stores all 3
- matrices sequentially).
- The calculated fundamental matrix may be passed further to #computeCorrespondEpilines that finds the
- epipolar lines corresponding to the specified points. It can also be passed to
- #stereoRectifyUncalibrated to compute the rectification transformation. :
- <code>
- // Example. Estimation of fundamental matrix using the RANSAC algorithm
- int point_count = 100;
- vector<Point2f> points1(point_count);
- vector<Point2f> points2(point_count);
- // initialize the points here ...
- for( int i = 0; i < point_count; i++ )
- {
- points1[i] = ...;
- points2[i] = ...;
- }
- Mat fundamental_matrix =
- findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
- </code></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findFundamentalMat-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-double-int-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findFundamentalMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findFundamentalMat(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points1,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points2,
- int method,
- double ransacReprojThreshold,
- double confidence,
- int maxIters,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</pre>
- <div class="block">Calculates a fundamental matrix from the corresponding points in two images.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N points from the first image. The point coordinates should be
- floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>method</code> - Method for computing a fundamental matrix.
- <ul>
- <li>
- REF: FM_7POINT for a 7-point algorithm. \(N = 7\)
- </li>
- <li>
- REF: FM_8POINT for an 8-point algorithm. \(N \ge 8\)
- </li>
- <li>
- REF: FM_RANSAC for the RANSAC algorithm. \(N \ge 8\)
- </li>
- <li>
- REF: FM_LMEDS for the LMedS algorithm. \(N \ge 8\)
- </li>
- </ul></dd>
- <dd><code>ransacReprojThreshold</code> - Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.</dd>
- <dd><code>confidence</code> - Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level
- of confidence (probability) that the estimated matrix is correct.</dd>
- <dd><code>mask</code> - optional output mask</dd>
- <dd><code>maxIters</code> - The maximum number of robust method iterations.
- The epipolar geometry is described by the following equation:
- \([p_2; 1]^T F [p_1; 1] = 0\)
- where \(F\) is a fundamental matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
- second images, respectively.
- The function calculates the fundamental matrix using one of four methods listed above and returns
- the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point
- algorithm, the function may return up to 3 solutions ( \(9 \times 3\) matrix that stores all 3
- matrices sequentially).
- The calculated fundamental matrix may be passed further to #computeCorrespondEpilines that finds the
- epipolar lines corresponding to the specified points. It can also be passed to
- #stereoRectifyUncalibrated to compute the rectification transformation. :
- <code>
- // Example. Estimation of fundamental matrix using the RANSAC algorithm
- int point_count = 100;
- vector<Point2f> points1(point_count);
- vector<Point2f> points2(point_count);
- // initialize the points here ...
- for( int i = 0; i < point_count; i++ )
- {
- points1[i] = ...;
- points2[i] = ...;
- }
- Mat fundamental_matrix =
- findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
- </code></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findFundamentalMat-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-double-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findFundamentalMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findFundamentalMat(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points1,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points2,
- int method,
- double ransacReprojThreshold,
- double confidence,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</pre>
- </li>
- </ul>
- <a name="findFundamentalMat-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.calib3d.UsacParams-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findFundamentalMat</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findFundamentalMat(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points1,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask,
- <a href="../../../org/opencv/calib3d/UsacParams.html" title="class in org.opencv.calib3d">UsacParams</a> params)</pre>
- </li>
- </ul>
- <a name="findHomography-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findHomography</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findHomography(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> srcPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dstPoints)</pre>
- <div class="block">Finds a perspective transformation between two planes.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>srcPoints</code> - Coordinates of the points in the original plane, a matrix of the type CV_32FC2
- or vector<Point2f> .</dd>
- <dd><code>dstPoints</code> - Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
- a vector<Point2f> .
- <ul>
- <li>
- <b>0</b> - a regular method using all the points, i.e., the least squares method
- </li>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- </li>
- <li>
- REF: RHO - PROSAC-based robust method
- </li>
- </ul>
- (used in the RANSAC and RHO methods only). That is, if
- \(\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} \cdot \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\)
- then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
- it usually makes sense to set this parameter somewhere in the range of 1 to 10.
- mask values are ignored.
- The function finds and returns the perspective transformation \(H\) between the source and the
- destination planes:
- \(s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\)
- so that the back-projection error
- \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
- is minimized. If the parameter method is set to the default value 0, the function uses all the point
- pairs to compute an initial homography estimate with a simple least-squares scheme.
- However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
- transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
- you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
- random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
- using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
- computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
- LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
- the mask of inliers/outliers.
- Regardless of the method, robust or not, the computed homography matrix is refined further (using
- inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
- re-projection error even more.
- The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
- noise is rather small, use the default method (method=0).
- The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
- determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
- cannot be estimated, an empty one will be returned.
- SEE:
- getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
- perspectiveTransform</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findHomography-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findHomography</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findHomography(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> srcPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dstPoints,
- int method)</pre>
- <div class="block">Finds a perspective transformation between two planes.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>srcPoints</code> - Coordinates of the points in the original plane, a matrix of the type CV_32FC2
- or vector<Point2f> .</dd>
- <dd><code>dstPoints</code> - Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
- a vector<Point2f> .</dd>
- <dd><code>method</code> - Method used to compute a homography matrix. The following methods are possible:
- <ul>
- <li>
- <b>0</b> - a regular method using all the points, i.e., the least squares method
- </li>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- </li>
- <li>
- REF: RHO - PROSAC-based robust method
- </li>
- </ul>
- (used in the RANSAC and RHO methods only). That is, if
- \(\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} \cdot \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\)
- then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
- it usually makes sense to set this parameter somewhere in the range of 1 to 10.
- mask values are ignored.
- The function finds and returns the perspective transformation \(H\) between the source and the
- destination planes:
- \(s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\)
- so that the back-projection error
- \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
- is minimized. If the parameter method is set to the default value 0, the function uses all the point
- pairs to compute an initial homography estimate with a simple least-squares scheme.
- However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
- transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
- you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
- random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
- using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
- computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
- LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
- the mask of inliers/outliers.
- Regardless of the method, robust or not, the computed homography matrix is refined further (using
- inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
- re-projection error even more.
- The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
- noise is rather small, use the default method (method=0).
- The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
- determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
- cannot be estimated, an empty one will be returned.
- SEE:
- getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
- perspectiveTransform</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findHomography-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findHomography</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findHomography(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> srcPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dstPoints,
- int method,
- double ransacReprojThreshold)</pre>
- <div class="block">Finds a perspective transformation between two planes.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>srcPoints</code> - Coordinates of the points in the original plane, a matrix of the type CV_32FC2
- or vector<Point2f> .</dd>
- <dd><code>dstPoints</code> - Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
- a vector<Point2f> .</dd>
- <dd><code>method</code> - Method used to compute a homography matrix. The following methods are possible:
- <ul>
- <li>
- <b>0</b> - a regular method using all the points, i.e., the least squares method
- </li>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- </li>
- <li>
- REF: RHO - PROSAC-based robust method
- </li>
- </ul></dd>
- <dd><code>ransacReprojThreshold</code> - Maximum allowed reprojection error to treat a point pair as an inlier
- (used in the RANSAC and RHO methods only). That is, if
- \(\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} \cdot \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\)
- then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
- it usually makes sense to set this parameter somewhere in the range of 1 to 10.
- mask values are ignored.
- The function finds and returns the perspective transformation \(H\) between the source and the
- destination planes:
- \(s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\)
- so that the back-projection error
- \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
- is minimized. If the parameter method is set to the default value 0, the function uses all the point
- pairs to compute an initial homography estimate with a simple least-squares scheme.
- However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
- transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
- you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
- random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
- using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
- computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
- LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
- the mask of inliers/outliers.
- Regardless of the method, robust or not, the computed homography matrix is refined further (using
- inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
- re-projection error even more.
- The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
- noise is rather small, use the default method (method=0).
- The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
- determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
- cannot be estimated, an empty one will be returned.
- SEE:
- getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
- perspectiveTransform</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findHomography-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findHomography</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findHomography(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> srcPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dstPoints,
- int method,
- double ransacReprojThreshold,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</pre>
- <div class="block">Finds a perspective transformation between two planes.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>srcPoints</code> - Coordinates of the points in the original plane, a matrix of the type CV_32FC2
- or vector<Point2f> .</dd>
- <dd><code>dstPoints</code> - Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
- a vector<Point2f> .</dd>
- <dd><code>method</code> - Method used to compute a homography matrix. The following methods are possible:
- <ul>
- <li>
- <b>0</b> - a regular method using all the points, i.e., the least squares method
- </li>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- </li>
- <li>
- REF: RHO - PROSAC-based robust method
- </li>
- </ul></dd>
- <dd><code>ransacReprojThreshold</code> - Maximum allowed reprojection error to treat a point pair as an inlier
- (used in the RANSAC and RHO methods only). That is, if
- \(\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} \cdot \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\)
- then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
- it usually makes sense to set this parameter somewhere in the range of 1 to 10.</dd>
- <dd><code>mask</code> - Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
- mask values are ignored.
- The function finds and returns the perspective transformation \(H\) between the source and the
- destination planes:
- \(s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\)
- so that the back-projection error
- \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
- is minimized. If the parameter method is set to the default value 0, the function uses all the point
- pairs to compute an initial homography estimate with a simple least-squares scheme.
- However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
- transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
- you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
- random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
- using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
- computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
- LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
- the mask of inliers/outliers.
- Regardless of the method, robust or not, the computed homography matrix is refined further (using
- inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
- re-projection error even more.
- The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
- noise is rather small, use the default method (method=0).
- The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
- determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
- cannot be estimated, an empty one will be returned.
- SEE:
- getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
- perspectiveTransform</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findHomography-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findHomography</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findHomography(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> srcPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dstPoints,
- int method,
- double ransacReprojThreshold,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask,
- int maxIters)</pre>
- <div class="block">Finds a perspective transformation between two planes.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>srcPoints</code> - Coordinates of the points in the original plane, a matrix of the type CV_32FC2
- or vector<Point2f> .</dd>
- <dd><code>dstPoints</code> - Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
- a vector<Point2f> .</dd>
- <dd><code>method</code> - Method used to compute a homography matrix. The following methods are possible:
- <ul>
- <li>
- <b>0</b> - a regular method using all the points, i.e., the least squares method
- </li>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- </li>
- <li>
- REF: RHO - PROSAC-based robust method
- </li>
- </ul></dd>
- <dd><code>ransacReprojThreshold</code> - Maximum allowed reprojection error to treat a point pair as an inlier
- (used in the RANSAC and RHO methods only). That is, if
- \(\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} \cdot \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\)
- then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
- it usually makes sense to set this parameter somewhere in the range of 1 to 10.</dd>
- <dd><code>mask</code> - Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
- mask values are ignored.</dd>
- <dd><code>maxIters</code> - The maximum number of RANSAC iterations.
- The function finds and returns the perspective transformation \(H\) between the source and the
- destination planes:
- \(s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\)
- so that the back-projection error
- \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
- is minimized. If the parameter method is set to the default value 0, the function uses all the point
- pairs to compute an initial homography estimate with a simple least-squares scheme.
- However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
- transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
- you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
- random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
- using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
- computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
- LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
- the mask of inliers/outliers.
- Regardless of the method, robust or not, the computed homography matrix is refined further (using
- inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
- re-projection error even more.
- The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
- noise is rather small, use the default method (method=0).
- The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
- determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
- cannot be estimated, an empty one will be returned.
- SEE:
- getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
- perspectiveTransform</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findHomography-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-int-double-org.opencv.core.Mat-int-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findHomography</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findHomography(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> srcPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dstPoints,
- int method,
- double ransacReprojThreshold,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask,
- int maxIters,
- double confidence)</pre>
- <div class="block">Finds a perspective transformation between two planes.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>srcPoints</code> - Coordinates of the points in the original plane, a matrix of the type CV_32FC2
- or vector<Point2f> .</dd>
- <dd><code>dstPoints</code> - Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
- a vector<Point2f> .</dd>
- <dd><code>method</code> - Method used to compute a homography matrix. The following methods are possible:
- <ul>
- <li>
- <b>0</b> - a regular method using all the points, i.e., the least squares method
- </li>
- <li>
- REF: RANSAC - RANSAC-based robust method
- </li>
- <li>
- REF: LMEDS - Least-Median robust method
- </li>
- <li>
- REF: RHO - PROSAC-based robust method
- </li>
- </ul></dd>
- <dd><code>ransacReprojThreshold</code> - Maximum allowed reprojection error to treat a point pair as an inlier
- (used in the RANSAC and RHO methods only). That is, if
- \(\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} \cdot \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\)
- then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
- it usually makes sense to set this parameter somewhere in the range of 1 to 10.</dd>
- <dd><code>mask</code> - Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
- mask values are ignored.</dd>
- <dd><code>maxIters</code> - The maximum number of RANSAC iterations.</dd>
- <dd><code>confidence</code> - Confidence level, between 0 and 1.
- The function finds and returns the perspective transformation \(H\) between the source and the
- destination planes:
- \(s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\)
- so that the back-projection error
- \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
- is minimized. If the parameter method is set to the default value 0, the function uses all the point
- pairs to compute an initial homography estimate with a simple least-squares scheme.
- However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
- transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
- you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
- random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
- using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
- computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
- LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
- the mask of inliers/outliers.
- Regardless of the method, robust or not, the computed homography matrix is refined further (using
- inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
- re-projection error even more.
- The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
- distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
- correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
- noise is rather small, use the default method (method=0).
- The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
- determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
- cannot be estimated, an empty one will be returned.
- SEE:
- getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
- perspectiveTransform</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="findHomography-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.calib3d.UsacParams-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>findHomography</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> findHomography(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> srcPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dstPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask,
- <a href="../../../org/opencv/calib3d/UsacParams.html" title="class in org.opencv.calib3d">UsacParams</a> params)</pre>
- </li>
- </ul>
- <a name="fisheye_calibrate-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_calibrate</h4>
- <pre>public static double fisheye_calibrate(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> image_size,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs)</pre>
- <div class="block">Performs camera calibration</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - vector of vectors of calibration pattern points in the calibration pattern
- coordinate space.</dd>
- <dd><code>imagePoints</code> - vector of vectors of the projections of calibration pattern points.
- imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
- objectPoints[i].size() for each i.</dd>
- <dd><code>image_size</code> - Size of the image used only to initialize the camera intrinsic matrix.</dd>
- <dd><code>K</code> - Output 3x3 floating-point camera intrinsic matrix
- \(\cameramatrix{A}\) . If
- REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
- initialized before calling the function.</dd>
- <dd><code>D</code> - Output vector of distortion coefficients \(\distcoeffsfisheye\).</dd>
- <dd><code>rvecs</code> - Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
- That is, each k-th rotation vector together with the corresponding k-th translation vector (see
- the next output parameter description) brings the calibration pattern from the model coordinate
- space (in which object points are specified) to the world coordinate space, that is, a real
- position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).</dd>
- <dd><code>tvecs</code> - Output vector of translation vectors estimated for each pattern view.
- <ul>
- <li>
- REF: fisheye::CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
- fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- center ( imageSize is used), and focal distances are computed in a least-squares fashion.
- </li>
- <li>
- REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
- of intrinsic optimization.
- </li>
- <li>
- REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients
- are set to zeros and stay zero.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
- optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
- optimization. It is the \(max(width,height)/\pi\) or the provided \(f_x\), \(f_y\) when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_calibrate-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_calibrate</h4>
- <pre>public static double fisheye_calibrate(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> image_size,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- int flags)</pre>
- <div class="block">Performs camera calibration</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - vector of vectors of calibration pattern points in the calibration pattern
- coordinate space.</dd>
- <dd><code>imagePoints</code> - vector of vectors of the projections of calibration pattern points.
- imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
- objectPoints[i].size() for each i.</dd>
- <dd><code>image_size</code> - Size of the image used only to initialize the camera intrinsic matrix.</dd>
- <dd><code>K</code> - Output 3x3 floating-point camera intrinsic matrix
- \(\cameramatrix{A}\) . If
- REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
- initialized before calling the function.</dd>
- <dd><code>D</code> - Output vector of distortion coefficients \(\distcoeffsfisheye\).</dd>
- <dd><code>rvecs</code> - Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
- That is, each k-th rotation vector together with the corresponding k-th translation vector (see
- the next output parameter description) brings the calibration pattern from the model coordinate
- space (in which object points are specified) to the world coordinate space, that is, a real
- position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).</dd>
- <dd><code>tvecs</code> - Output vector of translation vectors estimated for each pattern view.</dd>
- <dd><code>flags</code> - Different flags that may be zero or a combination of the following values:
- <ul>
- <li>
- REF: fisheye::CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
- fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- center ( imageSize is used), and focal distances are computed in a least-squares fashion.
- </li>
- <li>
- REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
- of intrinsic optimization.
- </li>
- <li>
- REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients
- are set to zeros and stay zero.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
- optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
- optimization. It is the \(max(width,height)/\pi\) or the provided \(f_x\), \(f_y\) when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_calibrate-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-int-org.opencv.core.TermCriteria-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_calibrate</h4>
- <pre>public static double fisheye_calibrate(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> image_size,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</pre>
- <div class="block">Performs camera calibration</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - vector of vectors of calibration pattern points in the calibration pattern
- coordinate space.</dd>
- <dd><code>imagePoints</code> - vector of vectors of the projections of calibration pattern points.
- imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
- objectPoints[i].size() for each i.</dd>
- <dd><code>image_size</code> - Size of the image used only to initialize the camera intrinsic matrix.</dd>
- <dd><code>K</code> - Output 3x3 floating-point camera intrinsic matrix
- \(\cameramatrix{A}\) . If
- REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
- initialized before calling the function.</dd>
- <dd><code>D</code> - Output vector of distortion coefficients \(\distcoeffsfisheye\).</dd>
- <dd><code>rvecs</code> - Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
- That is, each k-th rotation vector together with the corresponding k-th translation vector (see
- the next output parameter description) brings the calibration pattern from the model coordinate
- space (in which object points are specified) to the world coordinate space, that is, a real
- position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).</dd>
- <dd><code>tvecs</code> - Output vector of translation vectors estimated for each pattern view.</dd>
- <dd><code>flags</code> - Different flags that may be zero or a combination of the following values:
- <ul>
- <li>
- REF: fisheye::CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
- fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- center ( imageSize is used), and focal distances are computed in a least-squares fashion.
- </li>
- <li>
- REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
- of intrinsic optimization.
- </li>
- <li>
- REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients
- are set to zeros and stay zero.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
- optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
- optimization. It is the \(max(width,height)/\pi\) or the provided \(f_x\), \(f_y\) when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
- </li>
- </ul></dd>
- <dd><code>criteria</code> - Termination criteria for the iterative optimization algorithm.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_distortPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_distortPoints</h4>
- <pre>public static void fisheye_distortPoints(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D)</pre>
- <div class="block">Distorts 2D points using fisheye model.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>undistorted</code> - Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is
- the number of points in the view.</dd>
- <dd><code>K</code> - Camera intrinsic matrix \(cameramatrix{K}\).</dd>
- <dd><code>D</code> - Input vector of distortion coefficients \(\distcoeffsfisheye\).</dd>
- <dd><code>distorted</code> - Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> .
- Note that the function assumes the camera intrinsic matrix of the undistorted points to be identity.
- This means if you want to distort image points you have to multiply them with \(K^{-1}\).</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_distortPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_distortPoints</h4>
- <pre>public static void fisheye_distortPoints(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- double alpha)</pre>
- <div class="block">Distorts 2D points using fisheye model.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>undistorted</code> - Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is
- the number of points in the view.</dd>
- <dd><code>K</code> - Camera intrinsic matrix \(cameramatrix{K}\).</dd>
- <dd><code>D</code> - Input vector of distortion coefficients \(\distcoeffsfisheye\).</dd>
- <dd><code>alpha</code> - The skew coefficient.</dd>
- <dd><code>distorted</code> - Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> .
- Note that the function assumes the camera intrinsic matrix of the undistorted points to be identity.
- This means if you want to distort image points you have to multiply them with \(K^{-1}\).</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_estimateNewCameraMatrixForUndistortRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_estimateNewCameraMatrixForUndistortRectify</h4>
- <pre>public static void fisheye_estimateNewCameraMatrixForUndistortRectify(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> image_size,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P)</pre>
- <div class="block">Estimates new camera intrinsic matrix for undistortion or rectification.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>K</code> - Camera intrinsic matrix \(cameramatrix{K}\).</dd>
- <dd><code>image_size</code> - Size of the image</dd>
- <dd><code>D</code> - Input vector of distortion coefficients \(\distcoeffsfisheye\).</dd>
- <dd><code>R</code> - Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- 1-channel or 1x1 3-channel</dd>
- <dd><code>P</code> - New camera intrinsic matrix (3x3) or new projection matrix (3x4)
- length. Balance is in range of [0, 1].</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_estimateNewCameraMatrixForUndistortRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_estimateNewCameraMatrixForUndistortRectify</h4>
- <pre>public static void fisheye_estimateNewCameraMatrixForUndistortRectify(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> image_size,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P,
- double balance)</pre>
- <div class="block">Estimates new camera intrinsic matrix for undistortion or rectification.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>K</code> - Camera intrinsic matrix \(cameramatrix{K}\).</dd>
- <dd><code>image_size</code> - Size of the image</dd>
- <dd><code>D</code> - Input vector of distortion coefficients \(\distcoeffsfisheye\).</dd>
- <dd><code>R</code> - Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- 1-channel or 1x1 3-channel</dd>
- <dd><code>P</code> - New camera intrinsic matrix (3x3) or new projection matrix (3x4)</dd>
- <dd><code>balance</code> - Sets the new focal length in range between the min focal length and the max focal
- length. Balance is in range of [0, 1].</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_estimateNewCameraMatrixForUndistortRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Size-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_estimateNewCameraMatrixForUndistortRectify</h4>
- <pre>public static void fisheye_estimateNewCameraMatrixForUndistortRectify(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> image_size,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P,
- double balance,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> new_size)</pre>
- <div class="block">Estimates new camera intrinsic matrix for undistortion or rectification.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>K</code> - Camera intrinsic matrix \(cameramatrix{K}\).</dd>
- <dd><code>image_size</code> - Size of the image</dd>
- <dd><code>D</code> - Input vector of distortion coefficients \(\distcoeffsfisheye\).</dd>
- <dd><code>R</code> - Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- 1-channel or 1x1 3-channel</dd>
- <dd><code>P</code> - New camera intrinsic matrix (3x3) or new projection matrix (3x4)</dd>
- <dd><code>balance</code> - Sets the new focal length in range between the min focal length and the max focal
- length. Balance is in range of [0, 1].</dd>
- <dd><code>new_size</code> - the new size</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_estimateNewCameraMatrixForUndistortRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Size-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_estimateNewCameraMatrixForUndistortRectify</h4>
- <pre>public static void fisheye_estimateNewCameraMatrixForUndistortRectify(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> image_size,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P,
- double balance,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> new_size,
- double fov_scale)</pre>
- <div class="block">Estimates new camera intrinsic matrix for undistortion or rectification.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>K</code> - Camera intrinsic matrix \(cameramatrix{K}\).</dd>
- <dd><code>image_size</code> - Size of the image</dd>
- <dd><code>D</code> - Input vector of distortion coefficients \(\distcoeffsfisheye\).</dd>
- <dd><code>R</code> - Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- 1-channel or 1x1 3-channel</dd>
- <dd><code>P</code> - New camera intrinsic matrix (3x3) or new projection matrix (3x4)</dd>
- <dd><code>balance</code> - Sets the new focal length in range between the min focal length and the max focal
- length. Balance is in range of [0, 1].</dd>
- <dd><code>new_size</code> - the new size</dd>
- <dd><code>fov_scale</code> - Divisor for new focal length.</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_initUndistortRectifyMap-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_initUndistortRectifyMap</h4>
- <pre>public static void fisheye_initUndistortRectifyMap(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> size,
- int m1type,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> map1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> map2)</pre>
- <div class="block">Computes undistortion and rectification maps for image transform by #remap. If D is empty zero
- distortion is used, if R or P is empty identity matrixes are used.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>K</code> - Camera intrinsic matrix \(cameramatrix{K}\).</dd>
- <dd><code>D</code> - Input vector of distortion coefficients \(\distcoeffsfisheye\).</dd>
- <dd><code>R</code> - Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- 1-channel or 1x1 3-channel</dd>
- <dd><code>P</code> - New camera intrinsic matrix (3x3) or new projection matrix (3x4)</dd>
- <dd><code>size</code> - Undistorted image size.</dd>
- <dd><code>m1type</code> - Type of the first output map that can be CV_32FC1 or CV_16SC2 . See #convertMaps
- for details.</dd>
- <dd><code>map1</code> - The first output map.</dd>
- <dd><code>map2</code> - The second output map.</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_projectPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_projectPoints</h4>
- <pre>public static void fisheye_projectPoints(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D)</pre>
- </li>
- </ul>
- <a name="fisheye_projectPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_projectPoints</h4>
- <pre>public static void fisheye_projectPoints(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- double alpha)</pre>
- </li>
- </ul>
- <a name="fisheye_projectPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_projectPoints</h4>
- <pre>public static void fisheye_projectPoints(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- double alpha,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> jacobian)</pre>
- </li>
- </ul>
- <a name="fisheye_stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_stereoCalibrate</h4>
- <pre>public static double fisheye_stereoCalibrate(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T)</pre>
- </li>
- </ul>
- <a name="fisheye_stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_stereoCalibrate</h4>
- <pre>public static double fisheye_stereoCalibrate(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- int flags)</pre>
- </li>
- </ul>
- <a name="fisheye_stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-int-org.opencv.core.TermCriteria-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_stereoCalibrate</h4>
- <pre>public static double fisheye_stereoCalibrate(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</pre>
- </li>
- </ul>
- <a name="fisheye_stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_stereoCalibrate</h4>
- <pre>public static double fisheye_stereoCalibrate(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs)</pre>
- <div class="block">Performs stereo calibration</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Vector of vectors of the calibration pattern points.</dd>
- <dd><code>imagePoints1</code> - Vector of vectors of the projections of the calibration pattern points,
- observed by the first camera.</dd>
- <dd><code>imagePoints2</code> - Vector of vectors of the projections of the calibration pattern points,
- observed by the second camera.</dd>
- <dd><code>K1</code> - Input/output first camera intrinsic matrix:
- \(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If
- any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified,
- some or all of the matrix components must be initialized.</dd>
- <dd><code>D1</code> - Input/output vector of distortion coefficients \(\distcoeffsfisheye\) of 4 elements.</dd>
- <dd><code>K2</code> - Input/output second camera intrinsic matrix. The parameter is similar to K1 .</dd>
- <dd><code>D2</code> - Input/output lens distortion coefficients for the second camera. The parameter is
- similar to D1 .</dd>
- <dd><code>imageSize</code> - Size of the image used only to initialize camera intrinsic matrix.</dd>
- <dd><code>R</code> - Output rotation matrix between the 1st and the 2nd camera coordinate systems.</dd>
- <dd><code>T</code> - Output translation vector between the coordinate systems of the cameras.</dd>
- <dd><code>rvecs</code> - Output vector of rotation vectors ( REF: Rodrigues ) estimated for each pattern view in the
- coordinate system of the first camera of the stereo pair (e.g. std::vector<cv::Mat>). More in detail, each
- i-th rotation vector together with the corresponding i-th translation vector (see the next output parameter
- description) brings the calibration pattern from the object coordinate space (in which object points are
- specified) to the camera coordinate space of the first camera of the stereo pair. In more technical terms,
- the tuple of the i-th rotation and translation vector performs a change of basis from object coordinate space
- to camera coordinate space of the first camera of the stereo pair.</dd>
- <dd><code>tvecs</code> - Output vector of translation vectors estimated for each pattern view, see parameter description
- of previous output parameter ( rvecs ).
- <ul>
- <li>
- REF: fisheye::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices
- are estimated.
- </li>
- <li>
- REF: fisheye::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of
- fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- center (imageSize is used), and focal distances are computed in a least-squares fashion.
- </li>
- <li>
- REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
- of intrinsic optimization.
- </li>
- <li>
- REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
- zero.
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_stereoCalibrate</h4>
- <pre>public static double fisheye_stereoCalibrate(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- int flags)</pre>
- <div class="block">Performs stereo calibration</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Vector of vectors of the calibration pattern points.</dd>
- <dd><code>imagePoints1</code> - Vector of vectors of the projections of the calibration pattern points,
- observed by the first camera.</dd>
- <dd><code>imagePoints2</code> - Vector of vectors of the projections of the calibration pattern points,
- observed by the second camera.</dd>
- <dd><code>K1</code> - Input/output first camera intrinsic matrix:
- \(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If
- any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified,
- some or all of the matrix components must be initialized.</dd>
- <dd><code>D1</code> - Input/output vector of distortion coefficients \(\distcoeffsfisheye\) of 4 elements.</dd>
- <dd><code>K2</code> - Input/output second camera intrinsic matrix. The parameter is similar to K1 .</dd>
- <dd><code>D2</code> - Input/output lens distortion coefficients for the second camera. The parameter is
- similar to D1 .</dd>
- <dd><code>imageSize</code> - Size of the image used only to initialize camera intrinsic matrix.</dd>
- <dd><code>R</code> - Output rotation matrix between the 1st and the 2nd camera coordinate systems.</dd>
- <dd><code>T</code> - Output translation vector between the coordinate systems of the cameras.</dd>
- <dd><code>rvecs</code> - Output vector of rotation vectors ( REF: Rodrigues ) estimated for each pattern view in the
- coordinate system of the first camera of the stereo pair (e.g. std::vector<cv::Mat>). More in detail, each
- i-th rotation vector together with the corresponding i-th translation vector (see the next output parameter
- description) brings the calibration pattern from the object coordinate space (in which object points are
- specified) to the camera coordinate space of the first camera of the stereo pair. In more technical terms,
- the tuple of the i-th rotation and translation vector performs a change of basis from object coordinate space
- to camera coordinate space of the first camera of the stereo pair.</dd>
- <dd><code>tvecs</code> - Output vector of translation vectors estimated for each pattern view, see parameter description
- of previous output parameter ( rvecs ).</dd>
- <dd><code>flags</code> - Different flags that may be zero or a combination of the following values:
- <ul>
- <li>
- REF: fisheye::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices
- are estimated.
- </li>
- <li>
- REF: fisheye::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of
- fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- center (imageSize is used), and focal distances are computed in a least-squares fashion.
- </li>
- <li>
- REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
- of intrinsic optimization.
- </li>
- <li>
- REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
- zero.
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-int-org.opencv.core.TermCriteria-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_stereoCalibrate</h4>
- <pre>public static double fisheye_stereoCalibrate(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</pre>
- <div class="block">Performs stereo calibration</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Vector of vectors of the calibration pattern points.</dd>
- <dd><code>imagePoints1</code> - Vector of vectors of the projections of the calibration pattern points,
- observed by the first camera.</dd>
- <dd><code>imagePoints2</code> - Vector of vectors of the projections of the calibration pattern points,
- observed by the second camera.</dd>
- <dd><code>K1</code> - Input/output first camera intrinsic matrix:
- \(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If
- any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified,
- some or all of the matrix components must be initialized.</dd>
- <dd><code>D1</code> - Input/output vector of distortion coefficients \(\distcoeffsfisheye\) of 4 elements.</dd>
- <dd><code>K2</code> - Input/output second camera intrinsic matrix. The parameter is similar to K1 .</dd>
- <dd><code>D2</code> - Input/output lens distortion coefficients for the second camera. The parameter is
- similar to D1 .</dd>
- <dd><code>imageSize</code> - Size of the image used only to initialize camera intrinsic matrix.</dd>
- <dd><code>R</code> - Output rotation matrix between the 1st and the 2nd camera coordinate systems.</dd>
- <dd><code>T</code> - Output translation vector between the coordinate systems of the cameras.</dd>
- <dd><code>rvecs</code> - Output vector of rotation vectors ( REF: Rodrigues ) estimated for each pattern view in the
- coordinate system of the first camera of the stereo pair (e.g. std::vector<cv::Mat>). More in detail, each
- i-th rotation vector together with the corresponding i-th translation vector (see the next output parameter
- description) brings the calibration pattern from the object coordinate space (in which object points are
- specified) to the camera coordinate space of the first camera of the stereo pair. In more technical terms,
- the tuple of the i-th rotation and translation vector performs a change of basis from object coordinate space
- to camera coordinate space of the first camera of the stereo pair.</dd>
- <dd><code>tvecs</code> - Output vector of translation vectors estimated for each pattern view, see parameter description
- of previous output parameter ( rvecs ).</dd>
- <dd><code>flags</code> - Different flags that may be zero or a combination of the following values:
- <ul>
- <li>
- REF: fisheye::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices
- are estimated.
- </li>
- <li>
- REF: fisheye::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of
- fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- center (imageSize is used), and focal distances are computed in a least-squares fashion.
- </li>
- <li>
- REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
- of intrinsic optimization.
- </li>
- <li>
- REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
- </li>
- <li>
- REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
- zero.
- </li>
- </ul></dd>
- <dd><code>criteria</code> - Termination criteria for the iterative optimization algorithm.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_stereoRectify</h4>
- <pre>public static void fisheye_stereoRectify(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags)</pre>
- <div class="block">Stereo rectification for fisheye camera model</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>K1</code> - First camera intrinsic matrix.</dd>
- <dd><code>D1</code> - First camera distortion parameters.</dd>
- <dd><code>K2</code> - Second camera intrinsic matrix.</dd>
- <dd><code>D2</code> - Second camera distortion parameters.</dd>
- <dd><code>imageSize</code> - Size of the image used for stereo calibration.</dd>
- <dd><code>R</code> - Rotation matrix between the coordinate systems of the first and the second
- cameras.</dd>
- <dd><code>tvec</code> - Translation vector between coordinate systems of the cameras.</dd>
- <dd><code>R1</code> - Output 3x3 rectification transform (rotation matrix) for the first camera.</dd>
- <dd><code>R2</code> - Output 3x3 rectification transform (rotation matrix) for the second camera.</dd>
- <dd><code>P1</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- camera.</dd>
- <dd><code>P2</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- camera.</dd>
- <dd><code>Q</code> - Output \(4 \times 4\) disparity-to-depth mapping matrix (see #reprojectImageTo3D ).</dd>
- <dd><code>flags</code> - Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
- the function makes the principal points of each camera have the same pixel coordinates in the
- rectified views. And if the flag is not set, the function may still shift the images in the
- horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- useful image area.
- #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- is passed (default), it is set to the original imageSize . Setting it to larger value can help you
- preserve details in the original image, especially when there is a big radial distortion.
- length. Balance is in range of [0, 1].</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-org.opencv.core.Size-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_stereoRectify</h4>
- <pre>public static void fisheye_stereoRectify(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImageSize)</pre>
- <div class="block">Stereo rectification for fisheye camera model</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>K1</code> - First camera intrinsic matrix.</dd>
- <dd><code>D1</code> - First camera distortion parameters.</dd>
- <dd><code>K2</code> - Second camera intrinsic matrix.</dd>
- <dd><code>D2</code> - Second camera distortion parameters.</dd>
- <dd><code>imageSize</code> - Size of the image used for stereo calibration.</dd>
- <dd><code>R</code> - Rotation matrix between the coordinate systems of the first and the second
- cameras.</dd>
- <dd><code>tvec</code> - Translation vector between coordinate systems of the cameras.</dd>
- <dd><code>R1</code> - Output 3x3 rectification transform (rotation matrix) for the first camera.</dd>
- <dd><code>R2</code> - Output 3x3 rectification transform (rotation matrix) for the second camera.</dd>
- <dd><code>P1</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- camera.</dd>
- <dd><code>P2</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- camera.</dd>
- <dd><code>Q</code> - Output \(4 \times 4\) disparity-to-depth mapping matrix (see #reprojectImageTo3D ).</dd>
- <dd><code>flags</code> - Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
- the function makes the principal points of each camera have the same pixel coordinates in the
- rectified views. And if the flag is not set, the function may still shift the images in the
- horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- useful image area.</dd>
- <dd><code>newImageSize</code> - New image resolution after rectification. The same size should be passed to
- #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- is passed (default), it is set to the original imageSize . Setting it to larger value can help you
- preserve details in the original image, especially when there is a big radial distortion.
- length. Balance is in range of [0, 1].</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-org.opencv.core.Size-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_stereoRectify</h4>
- <pre>public static void fisheye_stereoRectify(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImageSize,
- double balance)</pre>
- <div class="block">Stereo rectification for fisheye camera model</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>K1</code> - First camera intrinsic matrix.</dd>
- <dd><code>D1</code> - First camera distortion parameters.</dd>
- <dd><code>K2</code> - Second camera intrinsic matrix.</dd>
- <dd><code>D2</code> - Second camera distortion parameters.</dd>
- <dd><code>imageSize</code> - Size of the image used for stereo calibration.</dd>
- <dd><code>R</code> - Rotation matrix between the coordinate systems of the first and the second
- cameras.</dd>
- <dd><code>tvec</code> - Translation vector between coordinate systems of the cameras.</dd>
- <dd><code>R1</code> - Output 3x3 rectification transform (rotation matrix) for the first camera.</dd>
- <dd><code>R2</code> - Output 3x3 rectification transform (rotation matrix) for the second camera.</dd>
- <dd><code>P1</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- camera.</dd>
- <dd><code>P2</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- camera.</dd>
- <dd><code>Q</code> - Output \(4 \times 4\) disparity-to-depth mapping matrix (see #reprojectImageTo3D ).</dd>
- <dd><code>flags</code> - Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
- the function makes the principal points of each camera have the same pixel coordinates in the
- rectified views. And if the flag is not set, the function may still shift the images in the
- horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- useful image area.</dd>
- <dd><code>newImageSize</code> - New image resolution after rectification. The same size should be passed to
- #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- is passed (default), it is set to the original imageSize . Setting it to larger value can help you
- preserve details in the original image, especially when there is a big radial distortion.</dd>
- <dd><code>balance</code> - Sets the new focal length in range between the min focal length and the max focal
- length. Balance is in range of [0, 1].</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-org.opencv.core.Size-double-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_stereoRectify</h4>
- <pre>public static void fisheye_stereoRectify(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImageSize,
- double balance,
- double fov_scale)</pre>
- <div class="block">Stereo rectification for fisheye camera model</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>K1</code> - First camera intrinsic matrix.</dd>
- <dd><code>D1</code> - First camera distortion parameters.</dd>
- <dd><code>K2</code> - Second camera intrinsic matrix.</dd>
- <dd><code>D2</code> - Second camera distortion parameters.</dd>
- <dd><code>imageSize</code> - Size of the image used for stereo calibration.</dd>
- <dd><code>R</code> - Rotation matrix between the coordinate systems of the first and the second
- cameras.</dd>
- <dd><code>tvec</code> - Translation vector between coordinate systems of the cameras.</dd>
- <dd><code>R1</code> - Output 3x3 rectification transform (rotation matrix) for the first camera.</dd>
- <dd><code>R2</code> - Output 3x3 rectification transform (rotation matrix) for the second camera.</dd>
- <dd><code>P1</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- camera.</dd>
- <dd><code>P2</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- camera.</dd>
- <dd><code>Q</code> - Output \(4 \times 4\) disparity-to-depth mapping matrix (see #reprojectImageTo3D ).</dd>
- <dd><code>flags</code> - Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
- the function makes the principal points of each camera have the same pixel coordinates in the
- rectified views. And if the flag is not set, the function may still shift the images in the
- horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- useful image area.</dd>
- <dd><code>newImageSize</code> - New image resolution after rectification. The same size should be passed to
- #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- is passed (default), it is set to the original imageSize . Setting it to larger value can help you
- preserve details in the original image, especially when there is a big radial distortion.</dd>
- <dd><code>balance</code> - Sets the new focal length in range between the min focal length and the max focal
- length. Balance is in range of [0, 1].</dd>
- <dd><code>fov_scale</code> - Divisor for new focal length.</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_undistortImage-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_undistortImage</h4>
- <pre>public static void fisheye_undistortImage(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D)</pre>
- <div class="block">Transforms an image to compensate for fisheye lens distortion.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>distorted</code> - image with fisheye lens distortion.</dd>
- <dd><code>undistorted</code> - Output image with compensated fisheye lens distortion.</dd>
- <dd><code>K</code> - Camera intrinsic matrix \(cameramatrix{K}\).</dd>
- <dd><code>D</code> - Input vector of distortion coefficients \(\distcoeffsfisheye\).
- may additionally scale and shift the result by using a different matrix.
- The function transforms an image to compensate radial and tangential lens distortion.
- The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap
- (with bilinear interpolation). See the former function for details of the transformation being
- performed.
- See below the results of undistortImage.
- <ul>
- <li>
- a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
- k_4, k_5, k_6) of distortion were optimized under calibration)
- <ul>
- <li>
- b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
- k_3, k_4) of fisheye distortion were optimized under calibration)
- </li>
- <li>
- c\) original image was captured with fisheye lens
- </li>
- </ul>
- Pictures a) and b) almost the same. But if we consider points of image located far from the center
- of image, we can notice that on image a) these points are distorted.
- </li>
- </ul>
- </dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_undistortImage-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_undistortImage</h4>
- <pre>public static void fisheye_undistortImage(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Knew)</pre>
- <div class="block">Transforms an image to compensate for fisheye lens distortion.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>distorted</code> - image with fisheye lens distortion.</dd>
- <dd><code>undistorted</code> - Output image with compensated fisheye lens distortion.</dd>
- <dd><code>K</code> - Camera intrinsic matrix \(cameramatrix{K}\).</dd>
- <dd><code>D</code> - Input vector of distortion coefficients \(\distcoeffsfisheye\).</dd>
- <dd><code>Knew</code> - Camera intrinsic matrix of the distorted image. By default, it is the identity matrix but you
- may additionally scale and shift the result by using a different matrix.
- The function transforms an image to compensate radial and tangential lens distortion.
- The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap
- (with bilinear interpolation). See the former function for details of the transformation being
- performed.
- See below the results of undistortImage.
- <ul>
- <li>
- a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
- k_4, k_5, k_6) of distortion were optimized under calibration)
- <ul>
- <li>
- b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
- k_3, k_4) of fisheye distortion were optimized under calibration)
- </li>
- <li>
- c\) original image was captured with fisheye lens
- </li>
- </ul>
- Pictures a) and b) almost the same. But if we consider points of image located far from the center
- of image, we can notice that on image a) these points are distorted.
- </li>
- </ul>
- </dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_undistortImage-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_undistortImage</h4>
- <pre>public static void fisheye_undistortImage(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Knew,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> new_size)</pre>
- <div class="block">Transforms an image to compensate for fisheye lens distortion.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>distorted</code> - image with fisheye lens distortion.</dd>
- <dd><code>undistorted</code> - Output image with compensated fisheye lens distortion.</dd>
- <dd><code>K</code> - Camera intrinsic matrix \(cameramatrix{K}\).</dd>
- <dd><code>D</code> - Input vector of distortion coefficients \(\distcoeffsfisheye\).</dd>
- <dd><code>Knew</code> - Camera intrinsic matrix of the distorted image. By default, it is the identity matrix but you
- may additionally scale and shift the result by using a different matrix.</dd>
- <dd><code>new_size</code> - the new size
- The function transforms an image to compensate radial and tangential lens distortion.
- The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap
- (with bilinear interpolation). See the former function for details of the transformation being
- performed.
- See below the results of undistortImage.
- <ul>
- <li>
- a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
- k_4, k_5, k_6) of distortion were optimized under calibration)
- <ul>
- <li>
- b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
- k_3, k_4) of fisheye distortion were optimized under calibration)
- </li>
- <li>
- c\) original image was captured with fisheye lens
- </li>
- </ul>
- Pictures a) and b) almost the same. But if we consider points of image located far from the center
- of image, we can notice that on image a) these points are distorted.
- </li>
- </ul>
- </dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_undistortPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_undistortPoints</h4>
- <pre>public static void fisheye_undistortPoints(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D)</pre>
- <div class="block">Undistorts 2D points using fisheye model</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>distorted</code> - Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is the
- number of points in the view.</dd>
- <dd><code>K</code> - Camera intrinsic matrix \(cameramatrix{K}\).</dd>
- <dd><code>D</code> - Input vector of distortion coefficients \(\distcoeffsfisheye\).
- 1-channel or 1x1 3-channel</dd>
- <dd><code>undistorted</code> - Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> .</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_undistortPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_undistortPoints</h4>
- <pre>public static void fisheye_undistortPoints(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R)</pre>
- <div class="block">Undistorts 2D points using fisheye model</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>distorted</code> - Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is the
- number of points in the view.</dd>
- <dd><code>K</code> - Camera intrinsic matrix \(cameramatrix{K}\).</dd>
- <dd><code>D</code> - Input vector of distortion coefficients \(\distcoeffsfisheye\).</dd>
- <dd><code>R</code> - Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- 1-channel or 1x1 3-channel</dd>
- <dd><code>undistorted</code> - Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> .</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_undistortPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_undistortPoints</h4>
- <pre>public static void fisheye_undistortPoints(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P)</pre>
- <div class="block">Undistorts 2D points using fisheye model</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>distorted</code> - Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is the
- number of points in the view.</dd>
- <dd><code>K</code> - Camera intrinsic matrix \(cameramatrix{K}\).</dd>
- <dd><code>D</code> - Input vector of distortion coefficients \(\distcoeffsfisheye\).</dd>
- <dd><code>R</code> - Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- 1-channel or 1x1 3-channel</dd>
- <dd><code>P</code> - New camera intrinsic matrix (3x3) or new projection matrix (3x4)</dd>
- <dd><code>undistorted</code> - Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> .</dd>
- </dl>
- </li>
- </ul>
- <a name="fisheye_undistortPoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.TermCriteria-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>fisheye_undistortPoints</h4>
- <pre>public static void fisheye_undistortPoints(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> undistorted,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> K,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> D,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</pre>
- <div class="block">Undistorts 2D points using fisheye model</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>distorted</code> - Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is the
- number of points in the view.</dd>
- <dd><code>K</code> - Camera intrinsic matrix \(cameramatrix{K}\).</dd>
- <dd><code>D</code> - Input vector of distortion coefficients \(\distcoeffsfisheye\).</dd>
- <dd><code>R</code> - Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- 1-channel or 1x1 3-channel</dd>
- <dd><code>P</code> - New camera intrinsic matrix (3x3) or new projection matrix (3x4)</dd>
- <dd><code>criteria</code> - Termination criteria</dd>
- <dd><code>undistorted</code> - Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> .</dd>
- </dl>
- </li>
- </ul>
- <a name="getDefaultNewCameraMatrix-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>getDefaultNewCameraMatrix</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> getDefaultNewCameraMatrix(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix)</pre>
- <div class="block">Returns the default new camera matrix.
- The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
- centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
- In the latter case, the new camera matrix will be:
- \(\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,\)
- where \(f_x\) and \(f_y\) are \((0,0)\) and \((1,1)\) elements of cameraMatrix, respectively.
- By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
- move the principal point. However, when you work with stereo, it is important to move the principal
- points in both views to the same y-coordinate (which is required by most of stereo correspondence
- algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
- each view where the principal points are located at the center.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>cameraMatrix</code> - Input camera matrix.
- parameter indicates whether this location should be at the image center or not.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="getDefaultNewCameraMatrix-org.opencv.core.Mat-org.opencv.core.Size-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>getDefaultNewCameraMatrix</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> getDefaultNewCameraMatrix(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imgsize)</pre>
- <div class="block">Returns the default new camera matrix.
- The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
- centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
- In the latter case, the new camera matrix will be:
- \(\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,\)
- where \(f_x\) and \(f_y\) are \((0,0)\) and \((1,1)\) elements of cameraMatrix, respectively.
- By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
- move the principal point. However, when you work with stereo, it is important to move the principal
- points in both views to the same y-coordinate (which is required by most of stereo correspondence
- algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
- each view where the principal points are located at the center.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>cameraMatrix</code> - Input camera matrix.</dd>
- <dd><code>imgsize</code> - Camera view image size in pixels.
- parameter indicates whether this location should be at the image center or not.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="getDefaultNewCameraMatrix-org.opencv.core.Mat-org.opencv.core.Size-boolean-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>getDefaultNewCameraMatrix</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> getDefaultNewCameraMatrix(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imgsize,
- boolean centerPrincipalPoint)</pre>
- <div class="block">Returns the default new camera matrix.
- The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
- centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
- In the latter case, the new camera matrix will be:
- \(\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,\)
- where \(f_x\) and \(f_y\) are \((0,0)\) and \((1,1)\) elements of cameraMatrix, respectively.
- By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
- move the principal point. However, when you work with stereo, it is important to move the principal
- points in both views to the same y-coordinate (which is required by most of stereo correspondence
- algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
- each view where the principal points are located at the center.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>cameraMatrix</code> - Input camera matrix.</dd>
- <dd><code>imgsize</code> - Camera view image size in pixels.</dd>
- <dd><code>centerPrincipalPoint</code> - Location of the principal point in the new camera matrix. The
- parameter indicates whether this location should be at the image center or not.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="getOptimalNewCameraMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>getOptimalNewCameraMatrix</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> getOptimalNewCameraMatrix(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- double alpha)</pre>
- <div class="block">Returns the new camera intrinsic matrix based on the free scaling parameter.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix.</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>imageSize</code> - Original image size.</dd>
- <dd><code>alpha</code> - Free scaling parameter between 0 (when all the pixels in the undistorted image are
- valid) and 1 (when all the source image pixels are retained in the undistorted image). See
- #stereoRectify for details.
- undistorted image. See roi1, roi2 description in #stereoRectify .
- principal point should be at the image center or not. By default, the principal point is chosen to
- best fit a subset of the source image (determined by alpha) to the corrected image.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>new_camera_matrix Output new camera intrinsic matrix.
- The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
- By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
- image pixels if there is valuable information in the corners alpha=1 , or get something in between.
- When alpha>0 , the undistorted result is likely to have some black pixels corresponding to
- "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
- coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
- #initUndistortRectifyMap to produce the maps for #remap .</dd>
- </dl>
- </li>
- </ul>
- <a name="getOptimalNewCameraMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-double-org.opencv.core.Size-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>getOptimalNewCameraMatrix</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> getOptimalNewCameraMatrix(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- double alpha,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImgSize)</pre>
- <div class="block">Returns the new camera intrinsic matrix based on the free scaling parameter.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix.</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>imageSize</code> - Original image size.</dd>
- <dd><code>alpha</code> - Free scaling parameter between 0 (when all the pixels in the undistorted image are
- valid) and 1 (when all the source image pixels are retained in the undistorted image). See
- #stereoRectify for details.</dd>
- <dd><code>newImgSize</code> - Image size after rectification. By default, it is set to imageSize .
- undistorted image. See roi1, roi2 description in #stereoRectify .
- principal point should be at the image center or not. By default, the principal point is chosen to
- best fit a subset of the source image (determined by alpha) to the corrected image.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>new_camera_matrix Output new camera intrinsic matrix.
- The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
- By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
- image pixels if there is valuable information in the corners alpha=1 , or get something in between.
- When alpha>0 , the undistorted result is likely to have some black pixels corresponding to
- "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
- coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
- #initUndistortRectifyMap to produce the maps for #remap .</dd>
- </dl>
- </li>
- </ul>
- <a name="getOptimalNewCameraMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-double-org.opencv.core.Size-org.opencv.core.Rect-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>getOptimalNewCameraMatrix</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> getOptimalNewCameraMatrix(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- double alpha,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImgSize,
- <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> validPixROI)</pre>
- <div class="block">Returns the new camera intrinsic matrix based on the free scaling parameter.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix.</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>imageSize</code> - Original image size.</dd>
- <dd><code>alpha</code> - Free scaling parameter between 0 (when all the pixels in the undistorted image are
- valid) and 1 (when all the source image pixels are retained in the undistorted image). See
- #stereoRectify for details.</dd>
- <dd><code>newImgSize</code> - Image size after rectification. By default, it is set to imageSize .</dd>
- <dd><code>validPixROI</code> - Optional output rectangle that outlines all-good-pixels region in the
- undistorted image. See roi1, roi2 description in #stereoRectify .
- principal point should be at the image center or not. By default, the principal point is chosen to
- best fit a subset of the source image (determined by alpha) to the corrected image.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>new_camera_matrix Output new camera intrinsic matrix.
- The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
- By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
- image pixels if there is valuable information in the corners alpha=1 , or get something in between.
- When alpha>0 , the undistorted result is likely to have some black pixels corresponding to
- "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
- coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
- #initUndistortRectifyMap to produce the maps for #remap .</dd>
- </dl>
- </li>
- </ul>
- <a name="getOptimalNewCameraMatrix-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-double-org.opencv.core.Size-org.opencv.core.Rect-boolean-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>getOptimalNewCameraMatrix</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> getOptimalNewCameraMatrix(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- double alpha,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImgSize,
- <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> validPixROI,
- boolean centerPrincipalPoint)</pre>
- <div class="block">Returns the new camera intrinsic matrix based on the free scaling parameter.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix.</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>imageSize</code> - Original image size.</dd>
- <dd><code>alpha</code> - Free scaling parameter between 0 (when all the pixels in the undistorted image are
- valid) and 1 (when all the source image pixels are retained in the undistorted image). See
- #stereoRectify for details.</dd>
- <dd><code>newImgSize</code> - Image size after rectification. By default, it is set to imageSize .</dd>
- <dd><code>validPixROI</code> - Optional output rectangle that outlines all-good-pixels region in the
- undistorted image. See roi1, roi2 description in #stereoRectify .</dd>
- <dd><code>centerPrincipalPoint</code> - Optional flag that indicates whether in the new camera intrinsic matrix the
- principal point should be at the image center or not. By default, the principal point is chosen to
- best fit a subset of the source image (determined by alpha) to the corrected image.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>new_camera_matrix Output new camera intrinsic matrix.
- The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
- By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
- image pixels if there is valuable information in the corners alpha=1 , or get something in between.
- When alpha>0 , the undistorted result is likely to have some black pixels corresponding to
- "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
- coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
- #initUndistortRectifyMap to produce the maps for #remap .</dd>
- </dl>
- </li>
- </ul>
- <a name="getValidDisparityROI-org.opencv.core.Rect-org.opencv.core.Rect-int-int-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>getValidDisparityROI</h4>
- <pre>public static <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> getValidDisparityROI(<a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> roi1,
- <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> roi2,
- int minDisparity,
- int numberOfDisparities,
- int blockSize)</pre>
- </li>
- </ul>
- <a name="initCameraMatrix2D-java.util.List-java.util.List-org.opencv.core.Size-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>initCameraMatrix2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> initCameraMatrix2D(java.util.List<<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize)</pre>
- <div class="block">Finds an initial camera intrinsic matrix from 3D-2D point correspondences.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Vector of vectors of the calibration pattern points in the calibration pattern
- coordinate space. In the old interface all the per-view vectors are concatenated. See
- #calibrateCamera for details.</dd>
- <dd><code>imagePoints</code> - Vector of vectors of the projections of the calibration pattern points. In the
- old interface all the per-view vectors are concatenated.</dd>
- <dd><code>imageSize</code> - Image size in pixels used to initialize the principal point.
- Otherwise, \(f_x = f_y \cdot \texttt{aspectRatio}\) .
- The function estimates and returns an initial camera intrinsic matrix for the camera calibration process.
- Currently, the function only supports planar calibration patterns, which are patterns where each
- object point has z-coordinate =0.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="initCameraMatrix2D-java.util.List-java.util.List-org.opencv.core.Size-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>initCameraMatrix2D</h4>
- <pre>public static <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> initCameraMatrix2D(java.util.List<<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a>> imagePoints,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- double aspectRatio)</pre>
- <div class="block">Finds an initial camera intrinsic matrix from 3D-2D point correspondences.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Vector of vectors of the calibration pattern points in the calibration pattern
- coordinate space. In the old interface all the per-view vectors are concatenated. See
- #calibrateCamera for details.</dd>
- <dd><code>imagePoints</code> - Vector of vectors of the projections of the calibration pattern points. In the
- old interface all the per-view vectors are concatenated.</dd>
- <dd><code>imageSize</code> - Image size in pixels used to initialize the principal point.</dd>
- <dd><code>aspectRatio</code> - If it is zero or negative, both \(f_x\) and \(f_y\) are estimated independently.
- Otherwise, \(f_x = f_y \cdot \texttt{aspectRatio}\) .
- The function estimates and returns an initial camera intrinsic matrix for the camera calibration process.
- Currently, the function only supports planar calibration patterns, which are patterns where each
- object point has z-coordinate =0.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="initInverseRectificationMap-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>initInverseRectificationMap</h4>
- <pre>public static void initInverseRectificationMap(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newCameraMatrix,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> size,
- int m1type,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> map1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> map2)</pre>
- <div class="block">Computes the projection and inverse-rectification transformation map. In essense, this is the inverse of
- #initUndistortRectifyMap to accomodate stereo-rectification of projectors ('inverse-cameras') in projector-camera pairs.
- The function computes the joint projection and inverse rectification transformation and represents the
- result in the form of maps for #remap. The projected image looks like a distorted version of the original which,
- once projected by a projector, should visually match the original. In case of a monocular camera, newCameraMatrix
- is usually equal to cameraMatrix, or it can be computed by
- #getOptimalNewCameraMatrix for a better control over scaling. In case of a projector-camera pair,
- newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify .
- The projector is oriented differently in the coordinate space, according to R. In case of projector-camera pairs,
- this helps align the projector (in the same manner as #initUndistortRectifyMap for the camera) to create a stereo-rectified pair. This
- allows epipolar lines on both images to become horizontal and have the same y-coordinate (in case of a horizontally aligned projector-camera pair).
- The function builds the maps for the inverse mapping algorithm that is used by #remap. That
- is, for each pixel \((u, v)\) in the destination (projected and inverse-rectified) image, the function
- computes the corresponding coordinates in the source image (that is, in the original digital image). The following process is applied:
- \(
- \begin{array}{l}
- \text{newCameraMatrix}\\
- x \leftarrow (u - {c'}_x)/{f'}_x \\
- y \leftarrow (v - {c'}_y)/{f'}_y \\
- \\\text{Undistortion}
- \\\scriptsize{\textit{though equation shown is for radial undistortion, function implements cv::undistortPoints()}}\\
- r^2 \leftarrow x^2 + y^2 \\
- \theta \leftarrow \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}\\
- x' \leftarrow \frac{x}{\theta} \\
- y' \leftarrow \frac{y}{\theta} \\
- \\\text{Rectification}\\
- {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\
- x'' \leftarrow X/W \\
- y'' \leftarrow Y/W \\
- \\\text{cameraMatrix}\\
- map_x(u,v) \leftarrow x'' f_x + c_x \\
- map_y(u,v) \leftarrow y'' f_y + c_y
- \end{array}
- \)
- where \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- are the distortion coefficients vector distCoeffs.
- In case of a stereo-rectified projector-camera pair, this function is called for the projector while #initUndistortRectifyMap is called for the camera head.
- This is done after #stereoRectify, which in turn is called after #stereoCalibrate. If the projector-camera pair
- is not calibrated, it is still possible to compute the rectification transformations directly from
- the fundamental matrix using #stereoRectifyUncalibrated. For the projector and camera, the function computes
- homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D
- space. R can be computed from H as
- \(\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\)
- where cameraMatrix can be chosen arbitrarily.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>cameraMatrix</code> - Input camera matrix \(A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>R</code> - Optional rectification transformation in the object space (3x3 matrix). R1 or R2,
- computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation
- is assumed.</dd>
- <dd><code>newCameraMatrix</code> - New camera matrix \(A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\).</dd>
- <dd><code>size</code> - Distorted image size.</dd>
- <dd><code>m1type</code> - Type of the first output map. Can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps</dd>
- <dd><code>map1</code> - The first output map for #remap.</dd>
- <dd><code>map2</code> - The second output map for #remap.</dd>
- </dl>
- </li>
- </ul>
- <a name="initUndistortRectifyMap-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-int-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>initUndistortRectifyMap</h4>
- <pre>public static void initUndistortRectifyMap(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newCameraMatrix,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> size,
- int m1type,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> map1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> map2)</pre>
- <div class="block">Computes the undistortion and rectification transformation map.
- The function computes the joint undistortion and rectification transformation and represents the
- result in the form of maps for #remap. The undistorted image looks like original, as if it is
- captured with a camera using the camera matrix =newCameraMatrix and zero distortion. In case of a
- monocular camera, newCameraMatrix is usually equal to cameraMatrix, or it can be computed by
- #getOptimalNewCameraMatrix for a better control over scaling. In case of a stereo camera,
- newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify .
- Also, this new camera is oriented differently in the coordinate space, according to R. That, for
- example, helps to align two heads of a stereo camera so that the epipolar lines on both images
- become horizontal and have the same y- coordinate (in case of a horizontally aligned stereo camera).
- The function actually builds the maps for the inverse mapping algorithm that is used by #remap. That
- is, for each pixel \((u, v)\) in the destination (corrected and rectified) image, the function
- computes the corresponding coordinates in the source image (that is, in the original image from
- camera). The following process is applied:
- \(
- \begin{array}{l}
- x \leftarrow (u - {c'}_x)/{f'}_x \\
- y \leftarrow (v - {c'}_y)/{f'}_y \\
- {[X\,Y\,W]} ^T \leftarrow R^{-1}*[x \, y \, 1]^T \\
- x' \leftarrow X/W \\
- y' \leftarrow Y/W \\
- r^2 \leftarrow x'^2 + y'^2 \\
- x'' \leftarrow x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}
- + 2p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4\\
- y'' \leftarrow y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}
- + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\
- s\vecthree{x'''}{y'''}{1} =
- \vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}((\tau_x, \tau_y)}
- {0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)}
- {0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\\
- map_x(u,v) \leftarrow x''' f_x + c_x \\
- map_y(u,v) \leftarrow y''' f_y + c_y
- \end{array}
- \)
- where \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- are the distortion coefficients.
- In case of a stereo camera, this function is called twice: once for each camera head, after
- #stereoRectify, which in its turn is called after #stereoCalibrate. But if the stereo camera
- was not calibrated, it is still possible to compute the rectification transformations directly from
- the fundamental matrix using #stereoRectifyUncalibrated. For each camera, the function computes
- homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D
- space. R can be computed from H as
- \(\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\)
- where cameraMatrix can be chosen arbitrarily.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>cameraMatrix</code> - Input camera matrix \(A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>R</code> - Optional rectification transformation in the object space (3x3 matrix). R1 or R2 ,
- computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation
- is assumed. In #initUndistortRectifyMap R assumed to be an identity matrix.</dd>
- <dd><code>newCameraMatrix</code> - New camera matrix \(A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\).</dd>
- <dd><code>size</code> - Undistorted image size.</dd>
- <dd><code>m1type</code> - Type of the first output map that can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps</dd>
- <dd><code>map1</code> - The first output map.</dd>
- <dd><code>map2</code> - The second output map.</dd>
- </dl>
- </li>
- </ul>
- <a name="matMulDeriv-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>matMulDeriv</h4>
- <pre>public static void matMulDeriv(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> A,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> B,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dABdA,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dABdB)</pre>
- <div class="block">Computes partial derivatives of the matrix product for each multiplied matrix.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>A</code> - First multiplied matrix.</dd>
- <dd><code>B</code> - Second multiplied matrix.</dd>
- <dd><code>dABdA</code> - First output derivative matrix d(A\*B)/dA of size
- \(\texttt{A.rows*B.cols} \times {A.rows*A.cols}\) .</dd>
- <dd><code>dABdB</code> - Second output derivative matrix d(A\*B)/dB of size
- \(\texttt{A.rows*B.cols} \times {B.rows*B.cols}\) .
- The function computes partial derivatives of the elements of the matrix product \(A*B\) with regard to
- the elements of each of the two input matrices. The function is used to compute the Jacobian
- matrices in #stereoCalibrate but can also be used in any other similar optimization function.</dd>
- </dl>
- </li>
- </ul>
- <a name="projectPoints-org.opencv.core.MatOfPoint3f-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.MatOfPoint2f-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>projectPoints</h4>
- <pre>public static void projectPoints(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints)</pre>
- <div class="block">Projects 3D points to an image plane.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
- 1-channel or 1xN/Nx1 3-channel (or vector<Point3f> ), where N is the number of points in the view.</dd>
- <dd><code>rvec</code> - The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of
- basis from world to camera coordinate system, see REF: calibrateCamera for details.</dd>
- <dd><code>tvec</code> - The translation vector, see parameter description above.</dd>
- <dd><code>cameraMatrix</code> - Camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\) . If the vector is empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>imagePoints</code> - Output array of image points, 1xN/Nx1 2-channel, or
- vector<Point2f> .
- points with respect to components of the rotation vector, translation vector, focal lengths,
- coordinates of the principal point and the distortion coefficients. In the old interface different
- components of the jacobian are returned via different output parameters.
- function assumes that the aspect ratio (\(f_x / f_y\)) is fixed and correspondingly adjusts the
- jacobian matrix.
- The function computes the 2D projections of 3D points to the image plane, given intrinsic and
- extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
- derivatives of image points coordinates (as functions of all the input parameters) with respect to
- the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
- optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself
- can also be used to compute a re-projection error, given the current intrinsic and extrinsic
- parameters.
- <b>Note:</b> By setting rvec = tvec = \([0, 0, 0]\), or by setting cameraMatrix to a 3x3 identity matrix,
- or by passing zero distortion coefficients, one can get various useful partial cases of the
- function. This means, one can compute the distorted coordinates for a sparse set of points or apply
- a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.</dd>
- </dl>
- </li>
- </ul>
- <a name="projectPoints-org.opencv.core.MatOfPoint3f-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>projectPoints</h4>
- <pre>public static void projectPoints(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> jacobian)</pre>
- <div class="block">Projects 3D points to an image plane.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
- 1-channel or 1xN/Nx1 3-channel (or vector<Point3f> ), where N is the number of points in the view.</dd>
- <dd><code>rvec</code> - The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of
- basis from world to camera coordinate system, see REF: calibrateCamera for details.</dd>
- <dd><code>tvec</code> - The translation vector, see parameter description above.</dd>
- <dd><code>cameraMatrix</code> - Camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\) . If the vector is empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>imagePoints</code> - Output array of image points, 1xN/Nx1 2-channel, or
- vector<Point2f> .</dd>
- <dd><code>jacobian</code> - Optional output 2Nx(10+<numDistCoeffs>) jacobian matrix of derivatives of image
- points with respect to components of the rotation vector, translation vector, focal lengths,
- coordinates of the principal point and the distortion coefficients. In the old interface different
- components of the jacobian are returned via different output parameters.
- function assumes that the aspect ratio (\(f_x / f_y\)) is fixed and correspondingly adjusts the
- jacobian matrix.
- The function computes the 2D projections of 3D points to the image plane, given intrinsic and
- extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
- derivatives of image points coordinates (as functions of all the input parameters) with respect to
- the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
- optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself
- can also be used to compute a re-projection error, given the current intrinsic and extrinsic
- parameters.
- <b>Note:</b> By setting rvec = tvec = \([0, 0, 0]\), or by setting cameraMatrix to a 3x3 identity matrix,
- or by passing zero distortion coefficients, one can get various useful partial cases of the
- function. This means, one can compute the distorted coordinates for a sparse set of points or apply
- a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.</dd>
- </dl>
- </li>
- </ul>
- <a name="projectPoints-org.opencv.core.MatOfPoint3f-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>projectPoints</h4>
- <pre>public static void projectPoints(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> jacobian,
- double aspectRatio)</pre>
- <div class="block">Projects 3D points to an image plane.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
- 1-channel or 1xN/Nx1 3-channel (or vector<Point3f> ), where N is the number of points in the view.</dd>
- <dd><code>rvec</code> - The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of
- basis from world to camera coordinate system, see REF: calibrateCamera for details.</dd>
- <dd><code>tvec</code> - The translation vector, see parameter description above.</dd>
- <dd><code>cameraMatrix</code> - Camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\) . If the vector is empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>imagePoints</code> - Output array of image points, 1xN/Nx1 2-channel, or
- vector<Point2f> .</dd>
- <dd><code>jacobian</code> - Optional output 2Nx(10+<numDistCoeffs>) jacobian matrix of derivatives of image
- points with respect to components of the rotation vector, translation vector, focal lengths,
- coordinates of the principal point and the distortion coefficients. In the old interface different
- components of the jacobian are returned via different output parameters.</dd>
- <dd><code>aspectRatio</code> - Optional "fixed aspect ratio" parameter. If the parameter is not 0, the
- function assumes that the aspect ratio (\(f_x / f_y\)) is fixed and correspondingly adjusts the
- jacobian matrix.
- The function computes the 2D projections of 3D points to the image plane, given intrinsic and
- extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
- derivatives of image points coordinates (as functions of all the input parameters) with respect to
- the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
- optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself
- can also be used to compute a re-projection error, given the current intrinsic and extrinsic
- parameters.
- <b>Note:</b> By setting rvec = tvec = \([0, 0, 0]\), or by setting cameraMatrix to a 3x3 identity matrix,
- or by passing zero distortion coefficients, one can get various useful partial cases of the
- function. This means, one can compute the distorted coordinates for a sparse set of points or apply
- a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.</dd>
- </dl>
- </li>
- </ul>
- <a name="recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>recoverPose</h4>
- <pre>public static int recoverPose(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t)</pre>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>E</code> - The input essential matrix.</dd>
- <dd><code>points1</code> - Array of N 2D points from the first image. The point coordinates should be
- floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- that performs a change of basis from the first camera's coordinate system to the second camera's
- coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- description below.</dd>
- <dd><code>t</code> - Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- length.
- are feature points from cameras with same focal length and principal point.
- inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
- recover pose. In the output mask only inliers which pass the chirality check.
- This function differs from the one above that it computes camera intrinsic matrix from focal length and
- principal point:
- \(A =
- \begin{bmatrix}
- f & 0 & x_{pp} \\
- 0 & f & y_{pp} \\
- 0 & 0 & 1
- \end{bmatrix}\)</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>recoverPose</h4>
- <pre>public static int recoverPose(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- double focal)</pre>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>E</code> - The input essential matrix.</dd>
- <dd><code>points1</code> - Array of N 2D points from the first image. The point coordinates should be
- floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- that performs a change of basis from the first camera's coordinate system to the second camera's
- coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- description below.</dd>
- <dd><code>t</code> - Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- length.</dd>
- <dd><code>focal</code> - Focal length of the camera. Note that this function assumes that points1 and points2
- are feature points from cameras with same focal length and principal point.
- inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
- recover pose. In the output mask only inliers which pass the chirality check.
- This function differs from the one above that it computes camera intrinsic matrix from focal length and
- principal point:
- \(A =
- \begin{bmatrix}
- f & 0 & x_{pp} \\
- 0 & f & y_{pp} \\
- 0 & 0 & 1
- \end{bmatrix}\)</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Point-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>recoverPose</h4>
- <pre>public static int recoverPose(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- double focal,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> pp)</pre>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>E</code> - The input essential matrix.</dd>
- <dd><code>points1</code> - Array of N 2D points from the first image. The point coordinates should be
- floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- that performs a change of basis from the first camera's coordinate system to the second camera's
- coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- description below.</dd>
- <dd><code>t</code> - Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- length.</dd>
- <dd><code>focal</code> - Focal length of the camera. Note that this function assumes that points1 and points2
- are feature points from cameras with same focal length and principal point.</dd>
- <dd><code>pp</code> - principal point of the camera.
- inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
- recover pose. In the output mask only inliers which pass the chirality check.
- This function differs from the one above that it computes camera intrinsic matrix from focal length and
- principal point:
- \(A =
- \begin{bmatrix}
- f & 0 & x_{pp} \\
- 0 & f & y_{pp} \\
- 0 & 0 & 1
- \end{bmatrix}\)</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Point-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>recoverPose</h4>
- <pre>public static int recoverPose(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- double focal,
- <a href="../../../org/opencv/core/Point.html" title="class in org.opencv.core">Point</a> pp,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</pre>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>E</code> - The input essential matrix.</dd>
- <dd><code>points1</code> - Array of N 2D points from the first image. The point coordinates should be
- floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- that performs a change of basis from the first camera's coordinate system to the second camera's
- coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- description below.</dd>
- <dd><code>t</code> - Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- length.</dd>
- <dd><code>focal</code> - Focal length of the camera. Note that this function assumes that points1 and points2
- are feature points from cameras with same focal length and principal point.</dd>
- <dd><code>pp</code> - principal point of the camera.</dd>
- <dd><code>mask</code> - Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
- inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
- recover pose. In the output mask only inliers which pass the chirality check.
- This function differs from the one above that it computes camera intrinsic matrix from focal length and
- principal point:
- \(A =
- \begin{bmatrix}
- f & 0 & x_{pp} \\
- 0 & f & y_{pp} \\
- 0 & 0 & 1
- \end{bmatrix}\)</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>recoverPose</h4>
- <pre>public static int recoverPose(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t)</pre>
- <div class="block">Recovers the relative camera rotation and the translation from an estimated essential
- matrix and the corresponding points in two images, using chirality check. Returns the number of
- inliers that pass the check.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>E</code> - The input essential matrix.</dd>
- <dd><code>points1</code> - Array of N 2D points from the first image. The point coordinates should be
- floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix</code> - Camera intrinsic matrix \(\cameramatrix{A}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera intrinsic matrix.</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- that performs a change of basis from the first camera's coordinate system to the second camera's
- coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- described below.</dd>
- <dd><code>t</code> - Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- length.
- inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
- recover pose. In the output mask only inliers which pass the chirality check.
- This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
- possible pose hypotheses by doing chirality check. The chirality check means that the
- triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
- This function can be used to process the output E and mask from REF: findEssentialMat. In this
- scenario, points1 and points2 are the same input for #findEssentialMat :
- <code>
- // Example. Estimation of fundamental matrix using the RANSAC algorithm
- int point_count = 100;
- vector<Point2f> points1(point_count);
- vector<Point2f> points2(point_count);
- // initialize the points here ...
- for( int i = 0; i < point_count; i++ )
- {
- points1[i] = ...;
- points2[i] = ...;
- }
- // cametra matrix with both focal lengths = 1, and principal point = (0, 0)
- Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
- Mat E, R, t, mask;
- E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
- recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
- </code></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>recoverPose</h4>
- <pre>public static int recoverPose(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- double distanceThresh)</pre>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>E</code> - The input essential matrix.</dd>
- <dd><code>points1</code> - Array of N 2D points from the first image. The point coordinates should be
- floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1.</dd>
- <dd><code>cameraMatrix</code> - Camera intrinsic matrix \(\cameramatrix{A}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera intrinsic matrix.</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- that performs a change of basis from the first camera's coordinate system to the second camera's
- coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- description below.</dd>
- <dd><code>t</code> - Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- length.</dd>
- <dd><code>distanceThresh</code> - threshold distance which is used to filter out far away points (i.e. infinite
- points).
- inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
- recover pose. In the output mask only inliers which pass the chirality check.
- This function differs from the one above that it outputs the triangulated 3D point that are used for
- the chirality check.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>recoverPose</h4>
- <pre>public static int recoverPose(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- double distanceThresh,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</pre>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>E</code> - The input essential matrix.</dd>
- <dd><code>points1</code> - Array of N 2D points from the first image. The point coordinates should be
- floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1.</dd>
- <dd><code>cameraMatrix</code> - Camera intrinsic matrix \(\cameramatrix{A}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera intrinsic matrix.</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- that performs a change of basis from the first camera's coordinate system to the second camera's
- coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- description below.</dd>
- <dd><code>t</code> - Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- length.</dd>
- <dd><code>distanceThresh</code> - threshold distance which is used to filter out far away points (i.e. infinite
- points).</dd>
- <dd><code>mask</code> - Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
- inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
- recover pose. In the output mask only inliers which pass the chirality check.
- This function differs from the one above that it outputs the triangulated 3D point that are used for
- the chirality check.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>recoverPose</h4>
- <pre>public static int recoverPose(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- double distanceThresh,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> triangulatedPoints)</pre>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>E</code> - The input essential matrix.</dd>
- <dd><code>points1</code> - Array of N 2D points from the first image. The point coordinates should be
- floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1.</dd>
- <dd><code>cameraMatrix</code> - Camera intrinsic matrix \(\cameramatrix{A}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera intrinsic matrix.</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- that performs a change of basis from the first camera's coordinate system to the second camera's
- coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- description below.</dd>
- <dd><code>t</code> - Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- length.</dd>
- <dd><code>distanceThresh</code> - threshold distance which is used to filter out far away points (i.e. infinite
- points).</dd>
- <dd><code>mask</code> - Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
- inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
- recover pose. In the output mask only inliers which pass the chirality check.</dd>
- <dd><code>triangulatedPoints</code> - 3D points which were reconstructed by triangulation.
- This function differs from the one above that it outputs the triangulated 3D point that are used for
- the chirality check.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>recoverPose</h4>
- <pre>public static int recoverPose(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</pre>
- <div class="block">Recovers the relative camera rotation and the translation from an estimated essential
- matrix and the corresponding points in two images, using chirality check. Returns the number of
- inliers that pass the check.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>E</code> - The input essential matrix.</dd>
- <dd><code>points1</code> - Array of N 2D points from the first image. The point coordinates should be
- floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix</code> - Camera intrinsic matrix \(\cameramatrix{A}\) .
- Note that this function assumes that points1 and points2 are feature points from cameras with the
- same camera intrinsic matrix.</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- that performs a change of basis from the first camera's coordinate system to the second camera's
- coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- described below.</dd>
- <dd><code>t</code> - Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- length.</dd>
- <dd><code>mask</code> - Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
- inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
- recover pose. In the output mask only inliers which pass the chirality check.
- This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
- possible pose hypotheses by doing chirality check. The chirality check means that the
- triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
- This function can be used to process the output E and mask from REF: findEssentialMat. In this
- scenario, points1 and points2 are the same input for #findEssentialMat :
- <code>
- // Example. Estimation of fundamental matrix using the RANSAC algorithm
- int point_count = 100;
- vector<Point2f> points1(point_count);
- vector<Point2f> points2(point_count);
- // initialize the points here ...
- for( int i = 0; i < point_count; i++ )
- {
- points1[i] = ...;
- points2[i] = ...;
- }
- // cametra matrix with both focal lengths = 1, and principal point = (0, 0)
- Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
- Mat E, R, t, mask;
- E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
- recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
- </code></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>recoverPose</h4>
- <pre>public static int recoverPose(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t)</pre>
- <div class="block">Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
- inliers that pass the check.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N 2D points from the first image. The point coordinates should be
- floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix1</code> - Input/output camera matrix for the first camera, the same as in
- REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.</dd>
- <dd><code>distCoeffs1</code> - Input/output vector of distortion coefficients, the same as in
- REF: calibrateCamera.</dd>
- <dd><code>cameraMatrix2</code> - Input/output camera matrix for the first camera, the same as in
- REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.</dd>
- <dd><code>distCoeffs2</code> - Input/output vector of distortion coefficients, the same as in
- REF: calibrateCamera.</dd>
- <dd><code>E</code> - The output essential matrix.</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- that performs a change of basis from the first camera's coordinate system to the second camera's
- coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- described below.</dd>
- <dd><code>t</code> - Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- length.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul>
- confidence (probability) that the estimated matrix is correct.
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- recover pose. In the output mask only inliers which pass the cheirality check.
- This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
- possible pose hypotheses by doing cheirality check. The cheirality check means that the
- triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
- This function can be used to process the output E and mask from REF: findEssentialMat. In this
- scenario, points1 and points2 are the same input for findEssentialMat.:
- <code>
- // Example. Estimation of fundamental matrix using the RANSAC algorithm
- int point_count = 100;
- vector<Point2f> points1(point_count);
- vector<Point2f> points2(point_count);
- // initialize the points here ...
- for( int i = 0; i < point_count; i++ )
- {
- points1[i] = ...;
- points2[i] = ...;
- }
- // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
- Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
- // Output: Essential matrix, relative rotation and relative translation.
- Mat E, R, t, mask;
- recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
- </code></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>recoverPose</h4>
- <pre>public static int recoverPose(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- int method)</pre>
- <div class="block">Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
- inliers that pass the check.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N 2D points from the first image. The point coordinates should be
- floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix1</code> - Input/output camera matrix for the first camera, the same as in
- REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.</dd>
- <dd><code>distCoeffs1</code> - Input/output vector of distortion coefficients, the same as in
- REF: calibrateCamera.</dd>
- <dd><code>cameraMatrix2</code> - Input/output camera matrix for the first camera, the same as in
- REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.</dd>
- <dd><code>distCoeffs2</code> - Input/output vector of distortion coefficients, the same as in
- REF: calibrateCamera.</dd>
- <dd><code>E</code> - The output essential matrix.</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- that performs a change of basis from the first camera's coordinate system to the second camera's
- coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- described below.</dd>
- <dd><code>t</code> - Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- length.</dd>
- <dd><code>method</code> - Method for computing an essential matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul>
- confidence (probability) that the estimated matrix is correct.
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- recover pose. In the output mask only inliers which pass the cheirality check.
- This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
- possible pose hypotheses by doing cheirality check. The cheirality check means that the
- triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
- This function can be used to process the output E and mask from REF: findEssentialMat. In this
- scenario, points1 and points2 are the same input for findEssentialMat.:
- <code>
- // Example. Estimation of fundamental matrix using the RANSAC algorithm
- int point_count = 100;
- vector<Point2f> points1(point_count);
- vector<Point2f> points2(point_count);
- // initialize the points here ...
- for( int i = 0; i < point_count; i++ )
- {
- points1[i] = ...;
- points2[i] = ...;
- }
- // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
- Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
- // Output: Essential matrix, relative rotation and relative translation.
- Mat E, R, t, mask;
- recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
- </code></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>recoverPose</h4>
- <pre>public static int recoverPose(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- int method,
- double prob)</pre>
- <div class="block">Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
- inliers that pass the check.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N 2D points from the first image. The point coordinates should be
- floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix1</code> - Input/output camera matrix for the first camera, the same as in
- REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.</dd>
- <dd><code>distCoeffs1</code> - Input/output vector of distortion coefficients, the same as in
- REF: calibrateCamera.</dd>
- <dd><code>cameraMatrix2</code> - Input/output camera matrix for the first camera, the same as in
- REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.</dd>
- <dd><code>distCoeffs2</code> - Input/output vector of distortion coefficients, the same as in
- REF: calibrateCamera.</dd>
- <dd><code>E</code> - The output essential matrix.</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- that performs a change of basis from the first camera's coordinate system to the second camera's
- coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- described below.</dd>
- <dd><code>t</code> - Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- length.</dd>
- <dd><code>method</code> - Method for computing an essential matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul></dd>
- <dd><code>prob</code> - Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- confidence (probability) that the estimated matrix is correct.
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- recover pose. In the output mask only inliers which pass the cheirality check.
- This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
- possible pose hypotheses by doing cheirality check. The cheirality check means that the
- triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
- This function can be used to process the output E and mask from REF: findEssentialMat. In this
- scenario, points1 and points2 are the same input for findEssentialMat.:
- <code>
- // Example. Estimation of fundamental matrix using the RANSAC algorithm
- int point_count = 100;
- vector<Point2f> points1(point_count);
- vector<Point2f> points2(point_count);
- // initialize the points here ...
- for( int i = 0; i < point_count; i++ )
- {
- points1[i] = ...;
- points2[i] = ...;
- }
- // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
- Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
- // Output: Essential matrix, relative rotation and relative translation.
- Mat E, R, t, mask;
- recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
- </code></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>recoverPose</h4>
- <pre>public static int recoverPose(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- int method,
- double prob,
- double threshold)</pre>
- <div class="block">Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
- inliers that pass the check.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N 2D points from the first image. The point coordinates should be
- floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix1</code> - Input/output camera matrix for the first camera, the same as in
- REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.</dd>
- <dd><code>distCoeffs1</code> - Input/output vector of distortion coefficients, the same as in
- REF: calibrateCamera.</dd>
- <dd><code>cameraMatrix2</code> - Input/output camera matrix for the first camera, the same as in
- REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.</dd>
- <dd><code>distCoeffs2</code> - Input/output vector of distortion coefficients, the same as in
- REF: calibrateCamera.</dd>
- <dd><code>E</code> - The output essential matrix.</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- that performs a change of basis from the first camera's coordinate system to the second camera's
- coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- described below.</dd>
- <dd><code>t</code> - Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- length.</dd>
- <dd><code>method</code> - Method for computing an essential matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul></dd>
- <dd><code>prob</code> - Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- confidence (probability) that the estimated matrix is correct.</dd>
- <dd><code>threshold</code> - Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.
- inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- recover pose. In the output mask only inliers which pass the cheirality check.
- This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
- possible pose hypotheses by doing cheirality check. The cheirality check means that the
- triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
- This function can be used to process the output E and mask from REF: findEssentialMat. In this
- scenario, points1 and points2 are the same input for findEssentialMat.:
- <code>
- // Example. Estimation of fundamental matrix using the RANSAC algorithm
- int point_count = 100;
- vector<Point2f> points1(point_count);
- vector<Point2f> points2(point_count);
- // initialize the points here ...
- for( int i = 0; i < point_count; i++ )
- {
- points1[i] = ...;
- points2[i] = ...;
- }
- // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
- Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
- // Output: Essential matrix, relative rotation and relative translation.
- Mat E, R, t, mask;
- recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
- </code></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="recoverPose-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-double-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>recoverPose</h4>
- <pre>public static int recoverPose(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> t,
- int method,
- double prob,
- double threshold,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mask)</pre>
- <div class="block">Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
- inliers that pass the check.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of N 2D points from the first image. The point coordinates should be
- floating-point (single or double precision).</dd>
- <dd><code>points2</code> - Array of the second image points of the same size and format as points1 .</dd>
- <dd><code>cameraMatrix1</code> - Input/output camera matrix for the first camera, the same as in
- REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.</dd>
- <dd><code>distCoeffs1</code> - Input/output vector of distortion coefficients, the same as in
- REF: calibrateCamera.</dd>
- <dd><code>cameraMatrix2</code> - Input/output camera matrix for the first camera, the same as in
- REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.</dd>
- <dd><code>distCoeffs2</code> - Input/output vector of distortion coefficients, the same as in
- REF: calibrateCamera.</dd>
- <dd><code>E</code> - The output essential matrix.</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
- that performs a change of basis from the first camera's coordinate system to the second camera's
- coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
- described below.</dd>
- <dd><code>t</code> - Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
- therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
- length.</dd>
- <dd><code>method</code> - Method for computing an essential matrix.
- <ul>
- <li>
- REF: RANSAC for the RANSAC algorithm.
- </li>
- <li>
- REF: LMEDS for the LMedS algorithm.
- </li>
- </ul></dd>
- <dd><code>prob</code> - Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
- confidence (probability) that the estimated matrix is correct.</dd>
- <dd><code>threshold</code> - Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
- line in pixels, beyond which the point is considered an outlier and is not used for computing the
- final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
- point localization, image resolution, and the image noise.</dd>
- <dd><code>mask</code> - Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
- inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
- recover pose. In the output mask only inliers which pass the cheirality check.
- This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
- possible pose hypotheses by doing cheirality check. The cheirality check means that the
- triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
- This function can be used to process the output E and mask from REF: findEssentialMat. In this
- scenario, points1 and points2 are the same input for findEssentialMat.:
- <code>
- // Example. Estimation of fundamental matrix using the RANSAC algorithm
- int point_count = 100;
- vector<Point2f> points1(point_count);
- vector<Point2f> points2(point_count);
- // initialize the points here ...
- for( int i = 0; i < point_count; i++ )
- {
- points1[i] = ...;
- points2[i] = ...;
- }
- // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
- Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
- // Output: Essential matrix, relative rotation and relative translation.
- Mat E, R, t, mask;
- recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
- </code></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="rectify3Collinear-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-double-org.opencv.core.Size-org.opencv.core.Rect-org.opencv.core.Rect-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>rectify3Collinear</h4>
- <pre>public static float rectify3Collinear(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs3,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imgpt1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imgpt3,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R12,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T12,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R13,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T13,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P3,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- double alpha,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImgSize,
- <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> roi1,
- <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> roi2,
- int flags)</pre>
- </li>
- </ul>
- <a name="reprojectImageTo3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>reprojectImageTo3D</h4>
- <pre>public static void reprojectImageTo3D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> disparity,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> _3dImage,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q)</pre>
- <div class="block">Reprojects a disparity image to 3D space.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>disparity</code> - Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
- floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
- fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or
- REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
- being used here.</dd>
- <dd><code>_3dImage</code> - Output 3-channel floating-point image of the same size as disparity. Each element of
- _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
- uses Q obtained by REF: stereoRectify, then the returned points are represented in the first
- camera's rectified coordinate system.</dd>
- <dd><code>Q</code> - \(4 \times 4\) perspective transformation matrix that can be obtained with
- REF: stereoRectify.
- points where the disparity was not computed). If handleMissingValues=true, then pixels with the
- minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
- to 3D points with a very large Z value (currently set to 10000).
- depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
- The function transforms a single-channel disparity map to a 3-channel image representing a 3D
- surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
- computes:
- \(\begin{bmatrix}
- X \\
- Y \\
- Z \\
- W
- \end{bmatrix} = Q \begin{bmatrix}
- x \\
- y \\
- \texttt{disparity} (x,y) \\
- z
- \end{bmatrix}.\)
- SEE:
- To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.</dd>
- </dl>
- </li>
- </ul>
- <a name="reprojectImageTo3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-boolean-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>reprojectImageTo3D</h4>
- <pre>public static void reprojectImageTo3D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> disparity,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> _3dImage,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- boolean handleMissingValues)</pre>
- <div class="block">Reprojects a disparity image to 3D space.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>disparity</code> - Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
- floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
- fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or
- REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
- being used here.</dd>
- <dd><code>_3dImage</code> - Output 3-channel floating-point image of the same size as disparity. Each element of
- _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
- uses Q obtained by REF: stereoRectify, then the returned points are represented in the first
- camera's rectified coordinate system.</dd>
- <dd><code>Q</code> - \(4 \times 4\) perspective transformation matrix that can be obtained with
- REF: stereoRectify.</dd>
- <dd><code>handleMissingValues</code> - Indicates, whether the function should handle missing values (i.e.
- points where the disparity was not computed). If handleMissingValues=true, then pixels with the
- minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
- to 3D points with a very large Z value (currently set to 10000).
- depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
- The function transforms a single-channel disparity map to a 3-channel image representing a 3D
- surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
- computes:
- \(\begin{bmatrix}
- X \\
- Y \\
- Z \\
- W
- \end{bmatrix} = Q \begin{bmatrix}
- x \\
- y \\
- \texttt{disparity} (x,y) \\
- z
- \end{bmatrix}.\)
- SEE:
- To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.</dd>
- </dl>
- </li>
- </ul>
- <a name="reprojectImageTo3D-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-boolean-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>reprojectImageTo3D</h4>
- <pre>public static void reprojectImageTo3D(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> disparity,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> _3dImage,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- boolean handleMissingValues,
- int ddepth)</pre>
- <div class="block">Reprojects a disparity image to 3D space.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>disparity</code> - Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
- floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
- fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or
- REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
- being used here.</dd>
- <dd><code>_3dImage</code> - Output 3-channel floating-point image of the same size as disparity. Each element of
- _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
- uses Q obtained by REF: stereoRectify, then the returned points are represented in the first
- camera's rectified coordinate system.</dd>
- <dd><code>Q</code> - \(4 \times 4\) perspective transformation matrix that can be obtained with
- REF: stereoRectify.</dd>
- <dd><code>handleMissingValues</code> - Indicates, whether the function should handle missing values (i.e.
- points where the disparity was not computed). If handleMissingValues=true, then pixels with the
- minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
- to 3D points with a very large Z value (currently set to 10000).</dd>
- <dd><code>ddepth</code> - The optional output array depth. If it is -1, the output image will have CV_32F
- depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
- The function transforms a single-channel disparity map to a 3-channel image representing a 3D
- surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
- computes:
- \(\begin{bmatrix}
- X \\
- Y \\
- Z \\
- W
- \end{bmatrix} = Q \begin{bmatrix}
- x \\
- y \\
- \texttt{disparity} (x,y) \\
- z
- \end{bmatrix}.\)
- SEE:
- To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.</dd>
- </dl>
- </li>
- </ul>
- <a name="Rodrigues-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>Rodrigues</h4>
- <pre>public static void Rodrigues(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst)</pre>
- <div class="block">Converts a rotation matrix to a rotation vector or vice versa.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).</dd>
- <dd><code>dst</code> - Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
- derivatives of the output array components with respect to the input array components.
- \(\begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos(\theta) I + (1- \cos{\theta} ) r r^T + \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\)
- Inverse transformation can be also done easily, since
- \(\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\)
- A rotation vector is a convenient and most compact representation of a rotation matrix (since any
- rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
- optimization procedures like REF: calibrateCamera, REF: stereoCalibrate, or REF: solvePnP .
- <b>Note:</b> More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate
- can be found in:
- <ul>
- <li>
- A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi CITE: Gallego2014ACF
- </li>
- </ul>
- <b>Note:</b> Useful information on SE(3) and Lie Groups can be found in:
- <ul>
- <li>
- A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco CITE: blanco2010tutorial
- </li>
- <li>
- Lie Groups for 2D and 3D Transformation, Ethan Eade CITE: Eade17
- </li>
- <li>
- A micro Lie theory for state estimation in robotics, Joan Solà, Jérémie Deray, Dinesh Atchuthan CITE: Sol2018AML
- </li>
- </ul></dd>
- </dl>
- </li>
- </ul>
- <a name="Rodrigues-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>Rodrigues</h4>
- <pre>public static void Rodrigues(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> jacobian)</pre>
- <div class="block">Converts a rotation matrix to a rotation vector or vice versa.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).</dd>
- <dd><code>dst</code> - Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.</dd>
- <dd><code>jacobian</code> - Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial
- derivatives of the output array components with respect to the input array components.
- \(\begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos(\theta) I + (1- \cos{\theta} ) r r^T + \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\)
- Inverse transformation can be also done easily, since
- \(\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\)
- A rotation vector is a convenient and most compact representation of a rotation matrix (since any
- rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
- optimization procedures like REF: calibrateCamera, REF: stereoCalibrate, or REF: solvePnP .
- <b>Note:</b> More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate
- can be found in:
- <ul>
- <li>
- A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi CITE: Gallego2014ACF
- </li>
- </ul>
- <b>Note:</b> Useful information on SE(3) and Lie Groups can be found in:
- <ul>
- <li>
- A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco CITE: blanco2010tutorial
- </li>
- <li>
- Lie Groups for 2D and 3D Transformation, Ethan Eade CITE: Eade17
- </li>
- <li>
- A micro Lie theory for state estimation in robotics, Joan Solà, Jérémie Deray, Dinesh Atchuthan CITE: Sol2018AML
- </li>
- </ul></dd>
- </dl>
- </li>
- </ul>
- <a name="RQDecomp3x3-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>RQDecomp3x3</h4>
- <pre>public static double[] RQDecomp3x3(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mtxR,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mtxQ)</pre>
- <div class="block">Computes an RQ decomposition of 3x3 matrices.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - 3x3 input matrix.</dd>
- <dd><code>mtxR</code> - Output 3x3 upper-triangular matrix.</dd>
- <dd><code>mtxQ</code> - Output 3x3 orthogonal matrix.
- The function computes a RQ decomposition using the given rotations. This function is used in
- #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
- and a rotation matrix.
- It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
- degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
- sequence of rotations about the three principal axes that results in the same orientation of an
- object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
- are only one of the possible solutions.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="RQDecomp3x3-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>RQDecomp3x3</h4>
- <pre>public static double[] RQDecomp3x3(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mtxR,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mtxQ,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Qx)</pre>
- <div class="block">Computes an RQ decomposition of 3x3 matrices.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - 3x3 input matrix.</dd>
- <dd><code>mtxR</code> - Output 3x3 upper-triangular matrix.</dd>
- <dd><code>mtxQ</code> - Output 3x3 orthogonal matrix.</dd>
- <dd><code>Qx</code> - Optional output 3x3 rotation matrix around x-axis.
- The function computes a RQ decomposition using the given rotations. This function is used in
- #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
- and a rotation matrix.
- It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
- degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
- sequence of rotations about the three principal axes that results in the same orientation of an
- object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
- are only one of the possible solutions.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="RQDecomp3x3-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>RQDecomp3x3</h4>
- <pre>public static double[] RQDecomp3x3(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mtxR,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mtxQ,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Qx,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Qy)</pre>
- <div class="block">Computes an RQ decomposition of 3x3 matrices.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - 3x3 input matrix.</dd>
- <dd><code>mtxR</code> - Output 3x3 upper-triangular matrix.</dd>
- <dd><code>mtxQ</code> - Output 3x3 orthogonal matrix.</dd>
- <dd><code>Qx</code> - Optional output 3x3 rotation matrix around x-axis.</dd>
- <dd><code>Qy</code> - Optional output 3x3 rotation matrix around y-axis.
- The function computes a RQ decomposition using the given rotations. This function is used in
- #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
- and a rotation matrix.
- It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
- degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
- sequence of rotations about the three principal axes that results in the same orientation of an
- object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
- are only one of the possible solutions.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="RQDecomp3x3-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>RQDecomp3x3</h4>
- <pre>public static double[] RQDecomp3x3(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mtxR,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> mtxQ,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Qx,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Qy,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Qz)</pre>
- <div class="block">Computes an RQ decomposition of 3x3 matrices.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - 3x3 input matrix.</dd>
- <dd><code>mtxR</code> - Output 3x3 upper-triangular matrix.</dd>
- <dd><code>mtxQ</code> - Output 3x3 orthogonal matrix.</dd>
- <dd><code>Qx</code> - Optional output 3x3 rotation matrix around x-axis.</dd>
- <dd><code>Qy</code> - Optional output 3x3 rotation matrix around y-axis.</dd>
- <dd><code>Qz</code> - Optional output 3x3 rotation matrix around z-axis.
- The function computes a RQ decomposition using the given rotations. This function is used in
- #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
- and a rotation matrix.
- It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
- degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
- sequence of rotations about the three principal axes that results in the same orientation of an
- object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
- are only one of the possible solutions.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="sampsonDistance-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>sampsonDistance</h4>
- <pre>public static double sampsonDistance(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> pt1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> pt2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F)</pre>
- <div class="block">Calculates the Sampson Distance between two points.
- The function cv::sampsonDistance calculates and returns the first order approximation of the geometric error as:
- \(
- sd( \texttt{pt1} , \texttt{pt2} )=
- \frac{(\texttt{pt2}^t \cdot \texttt{F} \cdot \texttt{pt1})^2}
- {((\texttt{F} \cdot \texttt{pt1})(0))^2 +
- ((\texttt{F} \cdot \texttt{pt1})(1))^2 +
- ((\texttt{F}^t \cdot \texttt{pt2})(0))^2 +
- ((\texttt{F}^t \cdot \texttt{pt2})(1))^2}
- \)
- The fundamental matrix may be calculated using the #findFundamentalMat function. See CITE: HartleyZ00 11.4.3 for details.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>pt1</code> - first homogeneous 2d point</dd>
- <dd><code>pt2</code> - second homogeneous 2d point</dd>
- <dd><code>F</code> - fundamental matrix</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>The computed Sampson distance.</dd>
- </dl>
- </li>
- </ul>
- <a name="solveP3P-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solveP3P</h4>
- <pre>public static int solveP3P(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- int flags)</pre>
- <div class="block">Finds an object pose from 3 3D-2D point correspondences.
- SEE: REF: calib3d_solvePnP</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, 3x3 1-channel or
- 1x3/3x1 3-channel. vector<Point3f> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, 3x2 1-channel or 1x3/3x1 2-channel.
- vector<Point2f> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvecs</code> - Output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
- the model coordinate system to the camera coordinate system. A P3P problem has up to 4 solutions.</dd>
- <dd><code>tvecs</code> - Output translation vectors.</dd>
- <dd><code>flags</code> - Method for solving a P3P problem:
- <ul>
- <li>
- REF: SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
- "Complete Solution Classification for the Perspective-Three-Point Problem" (CITE: gao2003complete).
- </li>
- <li>
- REF: SOLVEPNP_AP3P Method is based on the paper of T. Ke and S. Roumeliotis.
- "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (CITE: Ke17).
- </li>
- </ul>
- The function estimates the object pose given 3 object points, their corresponding image
- projections, as well as the camera intrinsic matrix and the distortion coefficients.
- <b>Note:</b>
- The solutions are sorted by reprojection errors (lowest to highest).</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnP-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnP</h4>
- <pre>public static boolean solvePnP(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec)</pre>
- <div class="block">Finds an object pose from 3D-2D point correspondences.
- SEE: REF: calib3d_solvePnP
- This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
- coordinate frame to the camera coordinate frame, using different methods:
- <ul>
- <li>
- P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution.
- </li>
- <li>
- REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar.
- </li>
- <li>
- REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- Number of input points must be 4. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- <li>
- for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- </li>
- </ul></div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or
- 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvec</code> - Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvec</code> - Output translation vector.
- the provided rvec and tvec values as initial approximations of the rotation and translation
- vectors, respectively, and further optimizes them.
- More information about Perspective-n-Points is described in REF: calib3d_solvePnP
- <b>Note:</b>
- <ul>
- <li>
- An example of how to use solvePnP for planar augmented reality can be found at
- opencv_source_code/samples/python/plane_ar.py
- </li>
- <li>
- If you are using Python:
- <ul>
- <li>
- Numpy array slices won't work as input because solvePnP requires contiguous
- arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- modules/calib3d/src/solvepnp.cpp version 2.4.9)
- </li>
- <li>
- The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- which requires 2-channel information.
- </li>
- <li>
- Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- </li>
- </ul>
- <li>
- The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- unstable and sometimes give completely wrong results. If you pass one of these two
- flags, REF: SOLVEPNP_EPNP method will be used instead.
- </li>
- <li>
- The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- </li>
- <li>
- With REF: SOLVEPNP_ITERATIVE method and <code>useExtrinsicGuess=true</code>, the minimum number of points is 3 (3 points
- are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- global solution to converge.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- Number of input points must be 4. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- <ul>
- <li>
- With REF: SOLVEPNP_SQPNP input points must be >= 3
- </li>
- </ul>
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnP-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-boolean-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnP</h4>
- <pre>public static boolean solvePnP(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- boolean useExtrinsicGuess)</pre>
- <div class="block">Finds an object pose from 3D-2D point correspondences.
- SEE: REF: calib3d_solvePnP
- This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
- coordinate frame to the camera coordinate frame, using different methods:
- <ul>
- <li>
- P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution.
- </li>
- <li>
- REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar.
- </li>
- <li>
- REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- Number of input points must be 4. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- <li>
- for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- </li>
- </ul></div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or
- 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvec</code> - Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvec</code> - Output translation vector.</dd>
- <dd><code>useExtrinsicGuess</code> - Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
- the provided rvec and tvec values as initial approximations of the rotation and translation
- vectors, respectively, and further optimizes them.
- More information about Perspective-n-Points is described in REF: calib3d_solvePnP
- <b>Note:</b>
- <ul>
- <li>
- An example of how to use solvePnP for planar augmented reality can be found at
- opencv_source_code/samples/python/plane_ar.py
- </li>
- <li>
- If you are using Python:
- <ul>
- <li>
- Numpy array slices won't work as input because solvePnP requires contiguous
- arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- modules/calib3d/src/solvepnp.cpp version 2.4.9)
- </li>
- <li>
- The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- which requires 2-channel information.
- </li>
- <li>
- Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- </li>
- </ul>
- <li>
- The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- unstable and sometimes give completely wrong results. If you pass one of these two
- flags, REF: SOLVEPNP_EPNP method will be used instead.
- </li>
- <li>
- The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- </li>
- <li>
- With REF: SOLVEPNP_ITERATIVE method and <code>useExtrinsicGuess=true</code>, the minimum number of points is 3 (3 points
- are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- global solution to converge.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- Number of input points must be 4. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- <ul>
- <li>
- With REF: SOLVEPNP_SQPNP input points must be >= 3
- </li>
- </ul>
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnP-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-boolean-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnP</h4>
- <pre>public static boolean solvePnP(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- boolean useExtrinsicGuess,
- int flags)</pre>
- <div class="block">Finds an object pose from 3D-2D point correspondences.
- SEE: REF: calib3d_solvePnP
- This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
- coordinate frame to the camera coordinate frame, using different methods:
- <ul>
- <li>
- P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution.
- </li>
- <li>
- REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar.
- </li>
- <li>
- REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- Number of input points must be 4. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- <li>
- for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- </li>
- </ul></div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or
- 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvec</code> - Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvec</code> - Output translation vector.</dd>
- <dd><code>useExtrinsicGuess</code> - Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
- the provided rvec and tvec values as initial approximations of the rotation and translation
- vectors, respectively, and further optimizes them.</dd>
- <dd><code>flags</code> - Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
- More information about Perspective-n-Points is described in REF: calib3d_solvePnP
- <b>Note:</b>
- <ul>
- <li>
- An example of how to use solvePnP for planar augmented reality can be found at
- opencv_source_code/samples/python/plane_ar.py
- </li>
- <li>
- If you are using Python:
- <ul>
- <li>
- Numpy array slices won't work as input because solvePnP requires contiguous
- arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- modules/calib3d/src/solvepnp.cpp version 2.4.9)
- </li>
- <li>
- The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- which requires 2-channel information.
- </li>
- <li>
- Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- </li>
- </ul>
- <li>
- The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- unstable and sometimes give completely wrong results. If you pass one of these two
- flags, REF: SOLVEPNP_EPNP method will be used instead.
- </li>
- <li>
- The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- </li>
- <li>
- With REF: SOLVEPNP_ITERATIVE method and <code>useExtrinsicGuess=true</code>, the minimum number of points is 3 (3 points
- are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- global solution to converge.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- Number of input points must be 4. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- <ul>
- <li>
- With REF: SOLVEPNP_SQPNP input points must be >= 3
- </li>
- </ul>
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPGeneric-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPGeneric</h4>
- <pre>public static int solvePnPGeneric(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs)</pre>
- <div class="block">Finds an object pose from 3D-2D point correspondences.
- SEE: REF: calib3d_solvePnP
- This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
- couple), depending on the number of input points and the chosen method:
- <ul>
- <li>
- P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
- </li>
- <li>
- REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
- </li>
- <li>
- REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- <li>
- for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- Only 1 solution is returned.
- </li>
- </ul></div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or
- 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvecs</code> - Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvecs</code> - Vector of output translation vectors.
- the provided rvec and tvec values as initial approximations of the rotation and translation
- vectors, respectively, and further optimizes them.
- and useExtrinsicGuess is set to true.
- and useExtrinsicGuess is set to true.
- (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
- and the 3D object points projected with the estimated pose.
- More information is described in REF: calib3d_solvePnP
- <b>Note:</b>
- <ul>
- <li>
- An example of how to use solvePnP for planar augmented reality can be found at
- opencv_source_code/samples/python/plane_ar.py
- </li>
- <li>
- If you are using Python:
- <ul>
- <li>
- Numpy array slices won't work as input because solvePnP requires contiguous
- arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- modules/calib3d/src/solvepnp.cpp version 2.4.9)
- </li>
- <li>
- The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- which requires 2-channel information.
- </li>
- <li>
- Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- </li>
- </ul>
- <li>
- The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- unstable and sometimes give completely wrong results. If you pass one of these two
- flags, REF: SOLVEPNP_EPNP method will be used instead.
- </li>
- <li>
- The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- </li>
- <li>
- With REF: SOLVEPNP_ITERATIVE method and <code>useExtrinsicGuess=true</code>, the minimum number of points is 3 (3 points
- are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- global solution to converge.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- Number of input points must be 4. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPGeneric-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-boolean-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPGeneric</h4>
- <pre>public static int solvePnPGeneric(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- boolean useExtrinsicGuess)</pre>
- <div class="block">Finds an object pose from 3D-2D point correspondences.
- SEE: REF: calib3d_solvePnP
- This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
- couple), depending on the number of input points and the chosen method:
- <ul>
- <li>
- P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
- </li>
- <li>
- REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
- </li>
- <li>
- REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- <li>
- for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- Only 1 solution is returned.
- </li>
- </ul></div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or
- 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvecs</code> - Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvecs</code> - Vector of output translation vectors.</dd>
- <dd><code>useExtrinsicGuess</code> - Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
- the provided rvec and tvec values as initial approximations of the rotation and translation
- vectors, respectively, and further optimizes them.
- and useExtrinsicGuess is set to true.
- and useExtrinsicGuess is set to true.
- (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
- and the 3D object points projected with the estimated pose.
- More information is described in REF: calib3d_solvePnP
- <b>Note:</b>
- <ul>
- <li>
- An example of how to use solvePnP for planar augmented reality can be found at
- opencv_source_code/samples/python/plane_ar.py
- </li>
- <li>
- If you are using Python:
- <ul>
- <li>
- Numpy array slices won't work as input because solvePnP requires contiguous
- arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- modules/calib3d/src/solvepnp.cpp version 2.4.9)
- </li>
- <li>
- The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- which requires 2-channel information.
- </li>
- <li>
- Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- </li>
- </ul>
- <li>
- The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- unstable and sometimes give completely wrong results. If you pass one of these two
- flags, REF: SOLVEPNP_EPNP method will be used instead.
- </li>
- <li>
- The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- </li>
- <li>
- With REF: SOLVEPNP_ITERATIVE method and <code>useExtrinsicGuess=true</code>, the minimum number of points is 3 (3 points
- are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- global solution to converge.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- Number of input points must be 4. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPGeneric-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-boolean-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPGeneric</h4>
- <pre>public static int solvePnPGeneric(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- boolean useExtrinsicGuess,
- int flags)</pre>
- <div class="block">Finds an object pose from 3D-2D point correspondences.
- SEE: REF: calib3d_solvePnP
- This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
- couple), depending on the number of input points and the chosen method:
- <ul>
- <li>
- P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
- </li>
- <li>
- REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
- </li>
- <li>
- REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- <li>
- for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- Only 1 solution is returned.
- </li>
- </ul></div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or
- 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvecs</code> - Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvecs</code> - Vector of output translation vectors.</dd>
- <dd><code>useExtrinsicGuess</code> - Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
- the provided rvec and tvec values as initial approximations of the rotation and translation
- vectors, respectively, and further optimizes them.</dd>
- <dd><code>flags</code> - Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
- and useExtrinsicGuess is set to true.
- and useExtrinsicGuess is set to true.
- (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
- and the 3D object points projected with the estimated pose.
- More information is described in REF: calib3d_solvePnP
- <b>Note:</b>
- <ul>
- <li>
- An example of how to use solvePnP for planar augmented reality can be found at
- opencv_source_code/samples/python/plane_ar.py
- </li>
- <li>
- If you are using Python:
- <ul>
- <li>
- Numpy array slices won't work as input because solvePnP requires contiguous
- arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- modules/calib3d/src/solvepnp.cpp version 2.4.9)
- </li>
- <li>
- The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- which requires 2-channel information.
- </li>
- <li>
- Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- </li>
- </ul>
- <li>
- The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- unstable and sometimes give completely wrong results. If you pass one of these two
- flags, REF: SOLVEPNP_EPNP method will be used instead.
- </li>
- <li>
- The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- </li>
- <li>
- With REF: SOLVEPNP_ITERATIVE method and <code>useExtrinsicGuess=true</code>, the minimum number of points is 3 (3 points
- are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- global solution to converge.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- Number of input points must be 4. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPGeneric-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-boolean-int-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPGeneric</h4>
- <pre>public static int solvePnPGeneric(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- boolean useExtrinsicGuess,
- int flags,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec)</pre>
- <div class="block">Finds an object pose from 3D-2D point correspondences.
- SEE: REF: calib3d_solvePnP
- This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
- couple), depending on the number of input points and the chosen method:
- <ul>
- <li>
- P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
- </li>
- <li>
- REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
- </li>
- <li>
- REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- <li>
- for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- Only 1 solution is returned.
- </li>
- </ul></div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or
- 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvecs</code> - Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvecs</code> - Vector of output translation vectors.</dd>
- <dd><code>useExtrinsicGuess</code> - Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
- the provided rvec and tvec values as initial approximations of the rotation and translation
- vectors, respectively, and further optimizes them.</dd>
- <dd><code>flags</code> - Method for solving a PnP problem: see REF: calib3d_solvePnP_flags</dd>
- <dd><code>rvec</code> - Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
- and useExtrinsicGuess is set to true.
- and useExtrinsicGuess is set to true.
- (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
- and the 3D object points projected with the estimated pose.
- More information is described in REF: calib3d_solvePnP
- <b>Note:</b>
- <ul>
- <li>
- An example of how to use solvePnP for planar augmented reality can be found at
- opencv_source_code/samples/python/plane_ar.py
- </li>
- <li>
- If you are using Python:
- <ul>
- <li>
- Numpy array slices won't work as input because solvePnP requires contiguous
- arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- modules/calib3d/src/solvepnp.cpp version 2.4.9)
- </li>
- <li>
- The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- which requires 2-channel information.
- </li>
- <li>
- Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- </li>
- </ul>
- <li>
- The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- unstable and sometimes give completely wrong results. If you pass one of these two
- flags, REF: SOLVEPNP_EPNP method will be used instead.
- </li>
- <li>
- The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- </li>
- <li>
- With REF: SOLVEPNP_ITERATIVE method and <code>useExtrinsicGuess=true</code>, the minimum number of points is 3 (3 points
- are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- global solution to converge.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- Number of input points must be 4. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPGeneric-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-boolean-int-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPGeneric</h4>
- <pre>public static int solvePnPGeneric(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- boolean useExtrinsicGuess,
- int flags,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec)</pre>
- <div class="block">Finds an object pose from 3D-2D point correspondences.
- SEE: REF: calib3d_solvePnP
- This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
- couple), depending on the number of input points and the chosen method:
- <ul>
- <li>
- P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
- </li>
- <li>
- REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
- </li>
- <li>
- REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- <li>
- for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- Only 1 solution is returned.
- </li>
- </ul></div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or
- 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvecs</code> - Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvecs</code> - Vector of output translation vectors.</dd>
- <dd><code>useExtrinsicGuess</code> - Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
- the provided rvec and tvec values as initial approximations of the rotation and translation
- vectors, respectively, and further optimizes them.</dd>
- <dd><code>flags</code> - Method for solving a PnP problem: see REF: calib3d_solvePnP_flags</dd>
- <dd><code>rvec</code> - Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
- and useExtrinsicGuess is set to true.</dd>
- <dd><code>tvec</code> - Translation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
- and useExtrinsicGuess is set to true.
- (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
- and the 3D object points projected with the estimated pose.
- More information is described in REF: calib3d_solvePnP
- <b>Note:</b>
- <ul>
- <li>
- An example of how to use solvePnP for planar augmented reality can be found at
- opencv_source_code/samples/python/plane_ar.py
- </li>
- <li>
- If you are using Python:
- <ul>
- <li>
- Numpy array slices won't work as input because solvePnP requires contiguous
- arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- modules/calib3d/src/solvepnp.cpp version 2.4.9)
- </li>
- <li>
- The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- which requires 2-channel information.
- </li>
- <li>
- Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- </li>
- </ul>
- <li>
- The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- unstable and sometimes give completely wrong results. If you pass one of these two
- flags, REF: SOLVEPNP_EPNP method will be used instead.
- </li>
- <li>
- The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- </li>
- <li>
- With REF: SOLVEPNP_ITERATIVE method and <code>useExtrinsicGuess=true</code>, the minimum number of points is 3 (3 points
- are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- global solution to converge.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- Number of input points must be 4. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPGeneric-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-boolean-int-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPGeneric</h4>
- <pre>public static int solvePnPGeneric(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- boolean useExtrinsicGuess,
- int flags,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> reprojectionError)</pre>
- <div class="block">Finds an object pose from 3D-2D point correspondences.
- SEE: REF: calib3d_solvePnP
- This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
- couple), depending on the number of input points and the chosen method:
- <ul>
- <li>
- P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
- </li>
- <li>
- REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
- </li>
- <li>
- REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
- Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- <li>
- for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
- Only 1 solution is returned.
- </li>
- </ul></div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or
- 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvecs</code> - Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvecs</code> - Vector of output translation vectors.</dd>
- <dd><code>useExtrinsicGuess</code> - Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
- the provided rvec and tvec values as initial approximations of the rotation and translation
- vectors, respectively, and further optimizes them.</dd>
- <dd><code>flags</code> - Method for solving a PnP problem: see REF: calib3d_solvePnP_flags</dd>
- <dd><code>rvec</code> - Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
- and useExtrinsicGuess is set to true.</dd>
- <dd><code>tvec</code> - Translation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
- and useExtrinsicGuess is set to true.</dd>
- <dd><code>reprojectionError</code> - Optional vector of reprojection error, that is the RMS error
- (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
- and the 3D object points projected with the estimated pose.
- More information is described in REF: calib3d_solvePnP
- <b>Note:</b>
- <ul>
- <li>
- An example of how to use solvePnP for planar augmented reality can be found at
- opencv_source_code/samples/python/plane_ar.py
- </li>
- <li>
- If you are using Python:
- <ul>
- <li>
- Numpy array slices won't work as input because solvePnP requires contiguous
- arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- modules/calib3d/src/solvepnp.cpp version 2.4.9)
- </li>
- <li>
- The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- which requires 2-channel information.
- </li>
- <li>
- Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- </li>
- </ul>
- <li>
- The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
- unstable and sometimes give completely wrong results. If you pass one of these two
- flags, REF: SOLVEPNP_EPNP method will be used instead.
- </li>
- <li>
- The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
- methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- </li>
- <li>
- With REF: SOLVEPNP_ITERATIVE method and <code>useExtrinsicGuess=true</code>, the minimum number of points is 3 (3 points
- are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- global solution to converge.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- </li>
- <li>
- With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- Number of input points must be 4. Object points must be defined in the following order:
- <ul>
- <li>
- point 0: [-squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 1: [ squareLength / 2, squareLength / 2, 0]
- </li>
- <li>
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- </li>
- <li>
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- </li>
- </ul>
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPRansac</h4>
- <pre>public static boolean solvePnPRansac(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec)</pre>
- <div class="block">Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
- SEE: REF: calib3d_solvePnP</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or
- 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvec</code> - Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvec</code> - Output translation vector.
- the provided rvec and tvec values as initial approximations of the rotation and translation
- vectors, respectively, and further optimizes them.
- is the maximum allowed distance between the observed and computed point projections to consider it
- an inlier.
- The function estimates an object pose given a set of object points, their corresponding image
- projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
- a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
- projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
- makes the function resistant to outliers.
- <b>Note:</b>
- <ul>
- <li>
- An example of how to use solvePNPRansac for object detection can be found at
- opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
- </li>
- <li>
- The default method used to estimate the camera pose for the Minimal Sample Sets step
- is #SOLVEPNP_EPNP. Exceptions are:
- <ul>
- <li>
- if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
- </li>
- <li>
- if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
- </li>
- </ul>
- <li>
- The method used to estimate the camera pose using all the inliers is defined by the
- flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
- the method #SOLVEPNP_EPNP will be used instead.
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-boolean-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPRansac</h4>
- <pre>public static boolean solvePnPRansac(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- boolean useExtrinsicGuess)</pre>
- <div class="block">Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
- SEE: REF: calib3d_solvePnP</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or
- 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvec</code> - Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvec</code> - Output translation vector.</dd>
- <dd><code>useExtrinsicGuess</code> - Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
- the provided rvec and tvec values as initial approximations of the rotation and translation
- vectors, respectively, and further optimizes them.
- is the maximum allowed distance between the observed and computed point projections to consider it
- an inlier.
- The function estimates an object pose given a set of object points, their corresponding image
- projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
- a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
- projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
- makes the function resistant to outliers.
- <b>Note:</b>
- <ul>
- <li>
- An example of how to use solvePNPRansac for object detection can be found at
- opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
- </li>
- <li>
- The default method used to estimate the camera pose for the Minimal Sample Sets step
- is #SOLVEPNP_EPNP. Exceptions are:
- <ul>
- <li>
- if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
- </li>
- <li>
- if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
- </li>
- </ul>
- <li>
- The method used to estimate the camera pose using all the inliers is defined by the
- flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
- the method #SOLVEPNP_EPNP will be used instead.
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-boolean-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPRansac</h4>
- <pre>public static boolean solvePnPRansac(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- boolean useExtrinsicGuess,
- int iterationsCount)</pre>
- <div class="block">Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
- SEE: REF: calib3d_solvePnP</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or
- 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvec</code> - Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvec</code> - Output translation vector.</dd>
- <dd><code>useExtrinsicGuess</code> - Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
- the provided rvec and tvec values as initial approximations of the rotation and translation
- vectors, respectively, and further optimizes them.</dd>
- <dd><code>iterationsCount</code> - Number of iterations.
- is the maximum allowed distance between the observed and computed point projections to consider it
- an inlier.
- The function estimates an object pose given a set of object points, their corresponding image
- projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
- a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
- projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
- makes the function resistant to outliers.
- <b>Note:</b>
- <ul>
- <li>
- An example of how to use solvePNPRansac for object detection can be found at
- opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
- </li>
- <li>
- The default method used to estimate the camera pose for the Minimal Sample Sets step
- is #SOLVEPNP_EPNP. Exceptions are:
- <ul>
- <li>
- if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
- </li>
- <li>
- if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
- </li>
- </ul>
- <li>
- The method used to estimate the camera pose using all the inliers is defined by the
- flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
- the method #SOLVEPNP_EPNP will be used instead.
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-boolean-int-float-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPRansac</h4>
- <pre>public static boolean solvePnPRansac(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- boolean useExtrinsicGuess,
- int iterationsCount,
- float reprojectionError)</pre>
- <div class="block">Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
- SEE: REF: calib3d_solvePnP</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or
- 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvec</code> - Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvec</code> - Output translation vector.</dd>
- <dd><code>useExtrinsicGuess</code> - Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
- the provided rvec and tvec values as initial approximations of the rotation and translation
- vectors, respectively, and further optimizes them.</dd>
- <dd><code>iterationsCount</code> - Number of iterations.</dd>
- <dd><code>reprojectionError</code> - Inlier threshold value used by the RANSAC procedure. The parameter value
- is the maximum allowed distance between the observed and computed point projections to consider it
- an inlier.
- The function estimates an object pose given a set of object points, their corresponding image
- projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
- a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
- projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
- makes the function resistant to outliers.
- <b>Note:</b>
- <ul>
- <li>
- An example of how to use solvePNPRansac for object detection can be found at
- opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
- </li>
- <li>
- The default method used to estimate the camera pose for the Minimal Sample Sets step
- is #SOLVEPNP_EPNP. Exceptions are:
- <ul>
- <li>
- if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
- </li>
- <li>
- if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
- </li>
- </ul>
- <li>
- The method used to estimate the camera pose using all the inliers is defined by the
- flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
- the method #SOLVEPNP_EPNP will be used instead.
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-boolean-int-float-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPRansac</h4>
- <pre>public static boolean solvePnPRansac(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- boolean useExtrinsicGuess,
- int iterationsCount,
- float reprojectionError,
- double confidence)</pre>
- <div class="block">Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
- SEE: REF: calib3d_solvePnP</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or
- 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvec</code> - Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvec</code> - Output translation vector.</dd>
- <dd><code>useExtrinsicGuess</code> - Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
- the provided rvec and tvec values as initial approximations of the rotation and translation
- vectors, respectively, and further optimizes them.</dd>
- <dd><code>iterationsCount</code> - Number of iterations.</dd>
- <dd><code>reprojectionError</code> - Inlier threshold value used by the RANSAC procedure. The parameter value
- is the maximum allowed distance between the observed and computed point projections to consider it
- an inlier.</dd>
- <dd><code>confidence</code> - The probability that the algorithm produces a useful result.
- The function estimates an object pose given a set of object points, their corresponding image
- projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
- a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
- projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
- makes the function resistant to outliers.
- <b>Note:</b>
- <ul>
- <li>
- An example of how to use solvePNPRansac for object detection can be found at
- opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
- </li>
- <li>
- The default method used to estimate the camera pose for the Minimal Sample Sets step
- is #SOLVEPNP_EPNP. Exceptions are:
- <ul>
- <li>
- if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
- </li>
- <li>
- if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
- </li>
- </ul>
- <li>
- The method used to estimate the camera pose using all the inliers is defined by the
- flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
- the method #SOLVEPNP_EPNP will be used instead.
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-boolean-int-float-double-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPRansac</h4>
- <pre>public static boolean solvePnPRansac(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- boolean useExtrinsicGuess,
- int iterationsCount,
- float reprojectionError,
- double confidence,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers)</pre>
- <div class="block">Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
- SEE: REF: calib3d_solvePnP</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or
- 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvec</code> - Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvec</code> - Output translation vector.</dd>
- <dd><code>useExtrinsicGuess</code> - Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
- the provided rvec and tvec values as initial approximations of the rotation and translation
- vectors, respectively, and further optimizes them.</dd>
- <dd><code>iterationsCount</code> - Number of iterations.</dd>
- <dd><code>reprojectionError</code> - Inlier threshold value used by the RANSAC procedure. The parameter value
- is the maximum allowed distance between the observed and computed point projections to consider it
- an inlier.</dd>
- <dd><code>confidence</code> - The probability that the algorithm produces a useful result.</dd>
- <dd><code>inliers</code> - Output vector that contains indices of inliers in objectPoints and imagePoints .
- The function estimates an object pose given a set of object points, their corresponding image
- projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
- a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
- projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
- makes the function resistant to outliers.
- <b>Note:</b>
- <ul>
- <li>
- An example of how to use solvePNPRansac for object detection can be found at
- opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
- </li>
- <li>
- The default method used to estimate the camera pose for the Minimal Sample Sets step
- is #SOLVEPNP_EPNP. Exceptions are:
- <ul>
- <li>
- if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
- </li>
- <li>
- if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
- </li>
- </ul>
- <li>
- The method used to estimate the camera pose using all the inliers is defined by the
- flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
- the method #SOLVEPNP_EPNP will be used instead.
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-boolean-int-float-double-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPRansac</h4>
- <pre>public static boolean solvePnPRansac(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- boolean useExtrinsicGuess,
- int iterationsCount,
- float reprojectionError,
- double confidence,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- int flags)</pre>
- <div class="block">Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
- SEE: REF: calib3d_solvePnP</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or
- 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can be also passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvec</code> - Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system.</dd>
- <dd><code>tvec</code> - Output translation vector.</dd>
- <dd><code>useExtrinsicGuess</code> - Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
- the provided rvec and tvec values as initial approximations of the rotation and translation
- vectors, respectively, and further optimizes them.</dd>
- <dd><code>iterationsCount</code> - Number of iterations.</dd>
- <dd><code>reprojectionError</code> - Inlier threshold value used by the RANSAC procedure. The parameter value
- is the maximum allowed distance between the observed and computed point projections to consider it
- an inlier.</dd>
- <dd><code>confidence</code> - The probability that the algorithm produces a useful result.</dd>
- <dd><code>inliers</code> - Output vector that contains indices of inliers in objectPoints and imagePoints .</dd>
- <dd><code>flags</code> - Method for solving a PnP problem (see REF: solvePnP ).
- The function estimates an object pose given a set of object points, their corresponding image
- projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
- a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
- projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
- makes the function resistant to outliers.
- <b>Note:</b>
- <ul>
- <li>
- An example of how to use solvePNPRansac for object detection can be found at
- opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
- </li>
- <li>
- The default method used to estimate the camera pose for the Minimal Sample Sets step
- is #SOLVEPNP_EPNP. Exceptions are:
- <ul>
- <li>
- if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
- </li>
- <li>
- if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
- </li>
- </ul>
- <li>
- The method used to estimate the camera pose using all the inliers is defined by the
- flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
- the method #SOLVEPNP_EPNP will be used instead.
- </li>
- </ul></dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPRansac</h4>
- <pre>public static boolean solvePnPRansac(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers)</pre>
- </li>
- </ul>
- <a name="solvePnPRansac-org.opencv.core.MatOfPoint3f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.MatOfDouble-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.calib3d.UsacParams-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPRansac</h4>
- <pre>public static boolean solvePnPRansac(<a href="../../../org/opencv/core/MatOfPoint3f.html" title="class in org.opencv.core">MatOfPoint3f</a> objectPoints,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/MatOfDouble.html" title="class in org.opencv.core">MatOfDouble</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> inliers,
- <a href="../../../org/opencv/calib3d/UsacParams.html" title="class in org.opencv.calib3d">UsacParams</a> params)</pre>
- </li>
- </ul>
- <a name="solvePnPRefineLM-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPRefineLM</h4>
- <pre>public static void solvePnPRefineLM(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec)</pre>
- <div class="block">Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
- to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
- SEE: REF: calib3d_solvePnP</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
- where N is the number of points. vector<Point3d> can also be passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can also be passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvec</code> - Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system. Input values are used as an initial solution.</dd>
- <dd><code>tvec</code> - Input/Output translation vector. Input values are used as an initial solution.
- The function refines the object pose given at least 3 object points, their corresponding image
- projections, an initial solution for the rotation and translation vector,
- as well as the camera intrinsic matrix and the distortion coefficients.
- The function minimizes the projection error with respect to the rotation and the translation vectors, according
- to a Levenberg-Marquardt iterative minimization CITE: Madsen04 CITE: Eade13 process.</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPRefineLM-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.TermCriteria-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPRefineLM</h4>
- <pre>public static void solvePnPRefineLM(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</pre>
- <div class="block">Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
- to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
- SEE: REF: calib3d_solvePnP</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
- where N is the number of points. vector<Point3d> can also be passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can also be passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvec</code> - Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system. Input values are used as an initial solution.</dd>
- <dd><code>tvec</code> - Input/Output translation vector. Input values are used as an initial solution.</dd>
- <dd><code>criteria</code> - Criteria when to stop the Levenberg-Marquard iterative algorithm.
- The function refines the object pose given at least 3 object points, their corresponding image
- projections, an initial solution for the rotation and translation vector,
- as well as the camera intrinsic matrix and the distortion coefficients.
- The function minimizes the projection error with respect to the rotation and the translation vectors, according
- to a Levenberg-Marquardt iterative minimization CITE: Madsen04 CITE: Eade13 process.</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPRefineVVS-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPRefineVVS</h4>
- <pre>public static void solvePnPRefineVVS(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec)</pre>
- <div class="block">Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
- to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
- SEE: REF: calib3d_solvePnP</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
- where N is the number of points. vector<Point3d> can also be passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can also be passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvec</code> - Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system. Input values are used as an initial solution.</dd>
- <dd><code>tvec</code> - Input/Output translation vector. Input values are used as an initial solution.
- gain in the Damped Gauss-Newton formulation.
- The function refines the object pose given at least 3 object points, their corresponding image
- projections, an initial solution for the rotation and translation vector,
- as well as the camera intrinsic matrix and the distortion coefficients.
- The function minimizes the projection error with respect to the rotation and the translation vectors, using a
- virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPRefineVVS-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.TermCriteria-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPRefineVVS</h4>
- <pre>public static void solvePnPRefineVVS(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</pre>
- <div class="block">Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
- to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
- SEE: REF: calib3d_solvePnP</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
- where N is the number of points. vector<Point3d> can also be passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can also be passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvec</code> - Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system. Input values are used as an initial solution.</dd>
- <dd><code>tvec</code> - Input/Output translation vector. Input values are used as an initial solution.</dd>
- <dd><code>criteria</code> - Criteria when to stop the Levenberg-Marquard iterative algorithm.
- gain in the Damped Gauss-Newton formulation.
- The function refines the object pose given at least 3 object points, their corresponding image
- projections, an initial solution for the rotation and translation vector,
- as well as the camera intrinsic matrix and the distortion coefficients.
- The function minimizes the projection error with respect to the rotation and the translation vectors, using a
- virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.</dd>
- </dl>
- </li>
- </ul>
- <a name="solvePnPRefineVVS-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.TermCriteria-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>solvePnPRefineVVS</h4>
- <pre>public static void solvePnPRefineVVS(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> objectPoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> imagePoints,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> rvec,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> tvec,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria,
- double VVSlambda)</pre>
- <div class="block">Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
- to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
- SEE: REF: calib3d_solvePnP</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
- where N is the number of points. vector<Point3d> can also be passed here.</dd>
- <dd><code>imagePoints</code> - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
- where N is the number of points. vector<Point2d> can also be passed here.</dd>
- <dd><code>cameraMatrix</code> - Input camera intrinsic matrix \(\cameramatrix{A}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
- assumed.</dd>
- <dd><code>rvec</code> - Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
- the model coordinate system to the camera coordinate system. Input values are used as an initial solution.</dd>
- <dd><code>tvec</code> - Input/Output translation vector. Input values are used as an initial solution.</dd>
- <dd><code>criteria</code> - Criteria when to stop the Levenberg-Marquard iterative algorithm.</dd>
- <dd><code>VVSlambda</code> - Gain for the virtual visual servoing control law, equivalent to the \(\alpha\)
- gain in the Damped Gauss-Newton formulation.
- The function refines the object pose given at least 3 object points, their corresponding image
- projections, an initial solution for the rotation and translation vector,
- as well as the camera intrinsic matrix and the distortion coefficients.
- The function minimizes the projection error with respect to the rotation and the translation vectors, using a
- virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.</dd>
- </dl>
- </li>
- </ul>
- <a name="stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoCalibrate</h4>
- <pre>public static double stereoCalibrate(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F)</pre>
- </li>
- </ul>
- <a name="stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoCalibrate</h4>
- <pre>public static double stereoCalibrate(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- int flags)</pre>
- </li>
- </ul>
- <a name="stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-org.opencv.core.TermCriteria-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoCalibrate</h4>
- <pre>public static double stereoCalibrate(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</pre>
- </li>
- </ul>
- <a name="stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoCalibrate</h4>
- <pre>public static double stereoCalibrate(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors)</pre>
- </li>
- </ul>
- <a name="stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoCalibrate</h4>
- <pre>public static double stereoCalibrate(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors,
- int flags)</pre>
- </li>
- </ul>
- <a name="stereoCalibrate-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-org.opencv.core.TermCriteria-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoCalibrate</h4>
- <pre>public static double stereoCalibrate(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</pre>
- </li>
- </ul>
- <a name="stereoCalibrateExtended-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoCalibrateExtended</h4>
- <pre>public static double stereoCalibrateExtended(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors)</pre>
- <div class="block">Calibrates a stereo camera set up. This function finds the intrinsic parameters
- for each of the two cameras and the extrinsic parameters between the two cameras.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Vector of vectors of the calibration pattern points. The same structure as
- in REF: calibrateCamera. For each pattern view, both cameras need to see the same object
- points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
- equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
- be equal for each i.</dd>
- <dd><code>imagePoints1</code> - Vector of vectors of the projections of the calibration pattern points,
- observed by the first camera. The same structure as in REF: calibrateCamera.</dd>
- <dd><code>imagePoints2</code> - Vector of vectors of the projections of the calibration pattern points,
- observed by the second camera. The same structure as in REF: calibrateCamera.</dd>
- <dd><code>cameraMatrix1</code> - Input/output camera intrinsic matrix for the first camera, the same as in
- REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.</dd>
- <dd><code>distCoeffs1</code> - Input/output vector of distortion coefficients, the same as in
- REF: calibrateCamera.</dd>
- <dd><code>cameraMatrix2</code> - Input/output second camera intrinsic matrix for the second camera. See description for
- cameraMatrix1.</dd>
- <dd><code>distCoeffs2</code> - Input/output lens distortion coefficients for the second camera. See
- description for distCoeffs1.</dd>
- <dd><code>imageSize</code> - Size of the image used only to initialize the camera intrinsic matrices.</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector T, this matrix brings
- points given in the first camera's coordinate system to points in the second camera's
- coordinate system. In more technical terms, the tuple of R and T performs a change of basis
- from the first camera's coordinate system to the second camera's coordinate system. Due to its
- duality, this tuple is equivalent to the position of the first camera with respect to the
- second camera coordinate system.</dd>
- <dd><code>T</code> - Output translation vector, see description above.</dd>
- <dd><code>E</code> - Output essential matrix.</dd>
- <dd><code>F</code> - Output fundamental matrix.</dd>
- <dd><code>rvecs</code> - Output vector of rotation vectors ( REF: Rodrigues ) estimated for each pattern view in the
- coordinate system of the first camera of the stereo pair (e.g. std::vector<cv::Mat>). More in detail, each
- i-th rotation vector together with the corresponding i-th translation vector (see the next output parameter
- description) brings the calibration pattern from the object coordinate space (in which object points are
- specified) to the camera coordinate space of the first camera of the stereo pair. In more technical terms,
- the tuple of the i-th rotation and translation vector performs a change of basis from object coordinate space
- to camera coordinate space of the first camera of the stereo pair.</dd>
- <dd><code>tvecs</code> - Output vector of translation vectors estimated for each pattern view, see parameter description
- of previous output parameter ( rvecs ).</dd>
- <dd><code>perViewErrors</code> - Output vector of the RMS re-projection error estimated for each pattern view.
- <ul>
- <li>
- REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
- matrices are estimated.
- </li>
- <li>
- REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
- according to the specified flags. Initial values are provided by the user.
- </li>
- <li>
- REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
- Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
- </li>
- <li>
- REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
- </li>
- <li>
- REF: CALIB_FIX_FOCAL_LENGTH Fix \(f^{(j)}_x\) and \(f^{(j)}_y\) .
- </li>
- <li>
- REF: CALIB_FIX_ASPECT_RATIO Optimize \(f^{(j)}_y\) . Fix the ratio \(f^{(j)}_x/f^{(j)}_y\)
- .
- </li>
- <li>
- REF: CALIB_SAME_FOCAL_LENGTH Enforce \(f^{(0)}_x=f^{(1)}_x\) and \(f^{(0)}_y=f^{(1)}_y\) .
- </li>
- <li>
- REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
- zeros and fix there.
- </li>
- <li>
- REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial
- distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set,
- the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- <li>
- REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
- compatibility, this extra flag should be explicitly specified to make the calibration
- function use the rational model and return 8 coefficients. If the flag is not set, the
- function computes and returns only 5 distortion coefficients.
- </li>
- <li>
- REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
- backward compatibility, this extra flag should be explicitly specified to make the
- calibration function use the thin prism model and return 12 coefficients. If the flag is not
- set, the function computes and returns only 5 distortion coefficients.
- </li>
- <li>
- REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
- the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- <li>
- REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
- backward compatibility, this extra flag should be explicitly specified to make the
- calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
- set, the function computes and returns only 5 distortion coefficients.
- </li>
- <li>
- REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
- the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- </ul>
- The function estimates the transformation between two cameras making a stereo pair. If one computes
- the poses of an object relative to the first camera and to the second camera,
- ( \(R_1\),\(T_1\) ) and (\(R_2\),\(T_2\)), respectively, for a stereo camera where the
- relative position and orientation between the two cameras are fixed, then those poses definitely
- relate to each other. This means, if the relative position and orientation (\(R\),\(T\)) of the
- two cameras is known, it is possible to compute (\(R_2\),\(T_2\)) when (\(R_1\),\(T_1\)) is
- given. This is what the described function does. It computes (\(R\),\(T\)) such that:
- \(R_2=R R_1\)
- \(T_2=R T_1 + T.\)
- Therefore, one can compute the coordinate representation of a 3D point for the second camera's
- coordinate system when given the point's coordinate representation in the first camera's coordinate
- system:
- \(\begin{bmatrix}
- X_2 \\
- Y_2 \\
- Z_2 \\
- 1
- \end{bmatrix} = \begin{bmatrix}
- R & T \\
- 0 & 1
- \end{bmatrix} \begin{bmatrix}
- X_1 \\
- Y_1 \\
- Z_1 \\
- 1
- \end{bmatrix}.\)
- Optionally, it computes the essential matrix E:
- \(E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R\)
- where \(T_i\) are components of the translation vector \(T\) : \(T=[T_0, T_1, T_2]^T\) .
- And the function can also compute the fundamental matrix F:
- \(F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}\)
- Besides the stereo-related information, the function can also perform a full calibration of each of
- the two cameras. However, due to the high dimensionality of the parameter space and noise in the
- input data, the function can diverge from the correct solution. If the intrinsic parameters can be
- estimated with high accuracy for each of the cameras individually (for example, using
- #calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the
- function along with the computed intrinsic parameters. Otherwise, if all the parameters are
- estimated at once, it makes sense to restrict some parameters, for example, pass
- REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a
- reasonable assumption.
- Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the
- points in all the available views from both cameras. The function returns the final value of the
- re-projection error.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="stereoCalibrateExtended-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoCalibrateExtended</h4>
- <pre>public static double stereoCalibrateExtended(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors,
- int flags)</pre>
- <div class="block">Calibrates a stereo camera set up. This function finds the intrinsic parameters
- for each of the two cameras and the extrinsic parameters between the two cameras.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Vector of vectors of the calibration pattern points. The same structure as
- in REF: calibrateCamera. For each pattern view, both cameras need to see the same object
- points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
- equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
- be equal for each i.</dd>
- <dd><code>imagePoints1</code> - Vector of vectors of the projections of the calibration pattern points,
- observed by the first camera. The same structure as in REF: calibrateCamera.</dd>
- <dd><code>imagePoints2</code> - Vector of vectors of the projections of the calibration pattern points,
- observed by the second camera. The same structure as in REF: calibrateCamera.</dd>
- <dd><code>cameraMatrix1</code> - Input/output camera intrinsic matrix for the first camera, the same as in
- REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.</dd>
- <dd><code>distCoeffs1</code> - Input/output vector of distortion coefficients, the same as in
- REF: calibrateCamera.</dd>
- <dd><code>cameraMatrix2</code> - Input/output second camera intrinsic matrix for the second camera. See description for
- cameraMatrix1.</dd>
- <dd><code>distCoeffs2</code> - Input/output lens distortion coefficients for the second camera. See
- description for distCoeffs1.</dd>
- <dd><code>imageSize</code> - Size of the image used only to initialize the camera intrinsic matrices.</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector T, this matrix brings
- points given in the first camera's coordinate system to points in the second camera's
- coordinate system. In more technical terms, the tuple of R and T performs a change of basis
- from the first camera's coordinate system to the second camera's coordinate system. Due to its
- duality, this tuple is equivalent to the position of the first camera with respect to the
- second camera coordinate system.</dd>
- <dd><code>T</code> - Output translation vector, see description above.</dd>
- <dd><code>E</code> - Output essential matrix.</dd>
- <dd><code>F</code> - Output fundamental matrix.</dd>
- <dd><code>rvecs</code> - Output vector of rotation vectors ( REF: Rodrigues ) estimated for each pattern view in the
- coordinate system of the first camera of the stereo pair (e.g. std::vector<cv::Mat>). More in detail, each
- i-th rotation vector together with the corresponding i-th translation vector (see the next output parameter
- description) brings the calibration pattern from the object coordinate space (in which object points are
- specified) to the camera coordinate space of the first camera of the stereo pair. In more technical terms,
- the tuple of the i-th rotation and translation vector performs a change of basis from object coordinate space
- to camera coordinate space of the first camera of the stereo pair.</dd>
- <dd><code>tvecs</code> - Output vector of translation vectors estimated for each pattern view, see parameter description
- of previous output parameter ( rvecs ).</dd>
- <dd><code>perViewErrors</code> - Output vector of the RMS re-projection error estimated for each pattern view.</dd>
- <dd><code>flags</code> - Different flags that may be zero or a combination of the following values:
- <ul>
- <li>
- REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
- matrices are estimated.
- </li>
- <li>
- REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
- according to the specified flags. Initial values are provided by the user.
- </li>
- <li>
- REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
- Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
- </li>
- <li>
- REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
- </li>
- <li>
- REF: CALIB_FIX_FOCAL_LENGTH Fix \(f^{(j)}_x\) and \(f^{(j)}_y\) .
- </li>
- <li>
- REF: CALIB_FIX_ASPECT_RATIO Optimize \(f^{(j)}_y\) . Fix the ratio \(f^{(j)}_x/f^{(j)}_y\)
- .
- </li>
- <li>
- REF: CALIB_SAME_FOCAL_LENGTH Enforce \(f^{(0)}_x=f^{(1)}_x\) and \(f^{(0)}_y=f^{(1)}_y\) .
- </li>
- <li>
- REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
- zeros and fix there.
- </li>
- <li>
- REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial
- distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set,
- the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- <li>
- REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
- compatibility, this extra flag should be explicitly specified to make the calibration
- function use the rational model and return 8 coefficients. If the flag is not set, the
- function computes and returns only 5 distortion coefficients.
- </li>
- <li>
- REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
- backward compatibility, this extra flag should be explicitly specified to make the
- calibration function use the thin prism model and return 12 coefficients. If the flag is not
- set, the function computes and returns only 5 distortion coefficients.
- </li>
- <li>
- REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
- the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- <li>
- REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
- backward compatibility, this extra flag should be explicitly specified to make the
- calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
- set, the function computes and returns only 5 distortion coefficients.
- </li>
- <li>
- REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
- the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- </ul>
- The function estimates the transformation between two cameras making a stereo pair. If one computes
- the poses of an object relative to the first camera and to the second camera,
- ( \(R_1\),\(T_1\) ) and (\(R_2\),\(T_2\)), respectively, for a stereo camera where the
- relative position and orientation between the two cameras are fixed, then those poses definitely
- relate to each other. This means, if the relative position and orientation (\(R\),\(T\)) of the
- two cameras is known, it is possible to compute (\(R_2\),\(T_2\)) when (\(R_1\),\(T_1\)) is
- given. This is what the described function does. It computes (\(R\),\(T\)) such that:
- \(R_2=R R_1\)
- \(T_2=R T_1 + T.\)
- Therefore, one can compute the coordinate representation of a 3D point for the second camera's
- coordinate system when given the point's coordinate representation in the first camera's coordinate
- system:
- \(\begin{bmatrix}
- X_2 \\
- Y_2 \\
- Z_2 \\
- 1
- \end{bmatrix} = \begin{bmatrix}
- R & T \\
- 0 & 1
- \end{bmatrix} \begin{bmatrix}
- X_1 \\
- Y_1 \\
- Z_1 \\
- 1
- \end{bmatrix}.\)
- Optionally, it computes the essential matrix E:
- \(E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R\)
- where \(T_i\) are components of the translation vector \(T\) : \(T=[T_0, T_1, T_2]^T\) .
- And the function can also compute the fundamental matrix F:
- \(F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}\)
- Besides the stereo-related information, the function can also perform a full calibration of each of
- the two cameras. However, due to the high dimensionality of the parameter space and noise in the
- input data, the function can diverge from the correct solution. If the intrinsic parameters can be
- estimated with high accuracy for each of the cameras individually (for example, using
- #calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the
- function along with the computed intrinsic parameters. Otherwise, if all the parameters are
- estimated at once, it makes sense to restrict some parameters, for example, pass
- REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a
- reasonable assumption.
- Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the
- points in all the available views from both cameras. The function returns the final value of the
- re-projection error.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="stereoCalibrateExtended-java.util.List-java.util.List-java.util.List-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-java.util.List-java.util.List-org.opencv.core.Mat-int-org.opencv.core.TermCriteria-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoCalibrateExtended</h4>
- <pre>public static double stereoCalibrateExtended(java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> objectPoints,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints1,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> imagePoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> E,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> rvecs,
- java.util.List<<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a>> tvecs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> perViewErrors,
- int flags,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</pre>
- <div class="block">Calibrates a stereo camera set up. This function finds the intrinsic parameters
- for each of the two cameras and the extrinsic parameters between the two cameras.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>objectPoints</code> - Vector of vectors of the calibration pattern points. The same structure as
- in REF: calibrateCamera. For each pattern view, both cameras need to see the same object
- points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
- equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
- be equal for each i.</dd>
- <dd><code>imagePoints1</code> - Vector of vectors of the projections of the calibration pattern points,
- observed by the first camera. The same structure as in REF: calibrateCamera.</dd>
- <dd><code>imagePoints2</code> - Vector of vectors of the projections of the calibration pattern points,
- observed by the second camera. The same structure as in REF: calibrateCamera.</dd>
- <dd><code>cameraMatrix1</code> - Input/output camera intrinsic matrix for the first camera, the same as in
- REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.</dd>
- <dd><code>distCoeffs1</code> - Input/output vector of distortion coefficients, the same as in
- REF: calibrateCamera.</dd>
- <dd><code>cameraMatrix2</code> - Input/output second camera intrinsic matrix for the second camera. See description for
- cameraMatrix1.</dd>
- <dd><code>distCoeffs2</code> - Input/output lens distortion coefficients for the second camera. See
- description for distCoeffs1.</dd>
- <dd><code>imageSize</code> - Size of the image used only to initialize the camera intrinsic matrices.</dd>
- <dd><code>R</code> - Output rotation matrix. Together with the translation vector T, this matrix brings
- points given in the first camera's coordinate system to points in the second camera's
- coordinate system. In more technical terms, the tuple of R and T performs a change of basis
- from the first camera's coordinate system to the second camera's coordinate system. Due to its
- duality, this tuple is equivalent to the position of the first camera with respect to the
- second camera coordinate system.</dd>
- <dd><code>T</code> - Output translation vector, see description above.</dd>
- <dd><code>E</code> - Output essential matrix.</dd>
- <dd><code>F</code> - Output fundamental matrix.</dd>
- <dd><code>rvecs</code> - Output vector of rotation vectors ( REF: Rodrigues ) estimated for each pattern view in the
- coordinate system of the first camera of the stereo pair (e.g. std::vector<cv::Mat>). More in detail, each
- i-th rotation vector together with the corresponding i-th translation vector (see the next output parameter
- description) brings the calibration pattern from the object coordinate space (in which object points are
- specified) to the camera coordinate space of the first camera of the stereo pair. In more technical terms,
- the tuple of the i-th rotation and translation vector performs a change of basis from object coordinate space
- to camera coordinate space of the first camera of the stereo pair.</dd>
- <dd><code>tvecs</code> - Output vector of translation vectors estimated for each pattern view, see parameter description
- of previous output parameter ( rvecs ).</dd>
- <dd><code>perViewErrors</code> - Output vector of the RMS re-projection error estimated for each pattern view.</dd>
- <dd><code>flags</code> - Different flags that may be zero or a combination of the following values:
- <ul>
- <li>
- REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
- matrices are estimated.
- </li>
- <li>
- REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
- according to the specified flags. Initial values are provided by the user.
- </li>
- <li>
- REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
- Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
- </li>
- <li>
- REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
- </li>
- <li>
- REF: CALIB_FIX_FOCAL_LENGTH Fix \(f^{(j)}_x\) and \(f^{(j)}_y\) .
- </li>
- <li>
- REF: CALIB_FIX_ASPECT_RATIO Optimize \(f^{(j)}_y\) . Fix the ratio \(f^{(j)}_x/f^{(j)}_y\)
- .
- </li>
- <li>
- REF: CALIB_SAME_FOCAL_LENGTH Enforce \(f^{(0)}_x=f^{(1)}_x\) and \(f^{(0)}_y=f^{(1)}_y\) .
- </li>
- <li>
- REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
- zeros and fix there.
- </li>
- <li>
- REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial
- distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set,
- the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- <li>
- REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
- compatibility, this extra flag should be explicitly specified to make the calibration
- function use the rational model and return 8 coefficients. If the flag is not set, the
- function computes and returns only 5 distortion coefficients.
- </li>
- <li>
- REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
- backward compatibility, this extra flag should be explicitly specified to make the
- calibration function use the thin prism model and return 12 coefficients. If the flag is not
- set, the function computes and returns only 5 distortion coefficients.
- </li>
- <li>
- REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
- the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- <li>
- REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
- backward compatibility, this extra flag should be explicitly specified to make the
- calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
- set, the function computes and returns only 5 distortion coefficients.
- </li>
- <li>
- REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
- the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
- supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- </li>
- </ul></dd>
- <dd><code>criteria</code> - Termination criteria for the iterative optimization algorithm.
- The function estimates the transformation between two cameras making a stereo pair. If one computes
- the poses of an object relative to the first camera and to the second camera,
- ( \(R_1\),\(T_1\) ) and (\(R_2\),\(T_2\)), respectively, for a stereo camera where the
- relative position and orientation between the two cameras are fixed, then those poses definitely
- relate to each other. This means, if the relative position and orientation (\(R\),\(T\)) of the
- two cameras is known, it is possible to compute (\(R_2\),\(T_2\)) when (\(R_1\),\(T_1\)) is
- given. This is what the described function does. It computes (\(R\),\(T\)) such that:
- \(R_2=R R_1\)
- \(T_2=R T_1 + T.\)
- Therefore, one can compute the coordinate representation of a 3D point for the second camera's
- coordinate system when given the point's coordinate representation in the first camera's coordinate
- system:
- \(\begin{bmatrix}
- X_2 \\
- Y_2 \\
- Z_2 \\
- 1
- \end{bmatrix} = \begin{bmatrix}
- R & T \\
- 0 & 1
- \end{bmatrix} \begin{bmatrix}
- X_1 \\
- Y_1 \\
- Z_1 \\
- 1
- \end{bmatrix}.\)
- Optionally, it computes the essential matrix E:
- \(E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R\)
- where \(T_i\) are components of the translation vector \(T\) : \(T=[T_0, T_1, T_2]^T\) .
- And the function can also compute the fundamental matrix F:
- \(F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}\)
- Besides the stereo-related information, the function can also perform a full calibration of each of
- the two cameras. However, due to the high dimensionality of the parameter space and noise in the
- input data, the function can diverge from the correct solution. If the intrinsic parameters can be
- estimated with high accuracy for each of the cameras individually (for example, using
- #calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the
- function along with the computed intrinsic parameters. Otherwise, if all the parameters are
- estimated at once, it makes sense to restrict some parameters, for example, pass
- REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a
- reasonable assumption.
- Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the
- points in all the available views from both cameras. The function returns the final value of the
- re-projection error.</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoRectify</h4>
- <pre>public static void stereoRectify(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q)</pre>
- <div class="block">Computes rectification transforms for each head of a calibrated stereo camera.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>cameraMatrix1</code> - First camera intrinsic matrix.</dd>
- <dd><code>distCoeffs1</code> - First camera distortion parameters.</dd>
- <dd><code>cameraMatrix2</code> - Second camera intrinsic matrix.</dd>
- <dd><code>distCoeffs2</code> - Second camera distortion parameters.</dd>
- <dd><code>imageSize</code> - Size of the image used for stereo calibration.</dd>
- <dd><code>R</code> - Rotation matrix from the coordinate system of the first camera to the second camera,
- see REF: stereoCalibrate.</dd>
- <dd><code>T</code> - Translation vector from the coordinate system of the first camera to the second camera,
- see REF: stereoCalibrate.</dd>
- <dd><code>R1</code> - Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
- brings points given in the unrectified first camera's coordinate system to points in the rectified
- first camera's coordinate system. In more technical terms, it performs a change of basis from the
- unrectified first camera's coordinate system to the rectified first camera's coordinate system.</dd>
- <dd><code>R2</code> - Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
- brings points given in the unrectified second camera's coordinate system to points in the rectified
- second camera's coordinate system. In more technical terms, it performs a change of basis from the
- unrectified second camera's coordinate system to the rectified second camera's coordinate system.</dd>
- <dd><code>P1</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- camera, i.e. it projects points given in the rectified first camera coordinate system into the
- rectified first camera's image.</dd>
- <dd><code>P2</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- camera, i.e. it projects points given in the rectified first camera coordinate system into the
- rectified second camera's image.</dd>
- <dd><code>Q</code> - Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
- the function makes the principal points of each camera have the same pixel coordinates in the
- rectified views. And if the flag is not set, the function may still shift the images in the
- horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- useful image area.
- scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
- images are zoomed and shifted so that only valid pixels are visible (no black areas after
- rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
- pixels from the original images from the cameras are retained in the rectified images (no source
- image pixels are lost). Any intermediate value yields an intermediate result between
- those two extreme cases.
- #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
- preserve details in the original image, especially when there is a big radial distortion.
- are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- (see the picture below).
- are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- (see the picture below).
- The function computes the rotation matrices for each camera that (virtually) make both camera image
- planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
- the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
- as input. As output, it provides two rotation matrices and also two projection matrices in the new
- coordinates. The function distinguishes the following two cases:
- <ul>
- <li>
- <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
- mainly along the x-axis (with possible small vertical shift). In the rectified images, the
- corresponding epipolar lines in the left and right cameras are horizontal and have the same
- y-coordinate. P1 and P2 look like:
- </li>
- </ul>
- \(\texttt{P1} = \begin{bmatrix}
- f & 0 & cx_1 & 0 \\
- 0 & f & cy & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix}\)
- \(\texttt{P2} = \begin{bmatrix}
- f & 0 & cx_2 & T_x \cdot f \\
- 0 & f & cy & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix} ,\)
- \(\texttt{Q} = \begin{bmatrix}
- 1 & 0 & 0 & -cx_1 \\
- 0 & 1 & 0 & -cy \\
- 0 & 0 & 0 & f \\
- 0 & 0 & -\frac{1}{T_x} & \frac{cx_1 - cx_2}{T_x}
- \end{bmatrix} \)
- where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
- REF: CALIB_ZERO_DISPARITY is set.
- <ul>
- <li>
- <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
- mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
- lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
- </li>
- </ul>
- \(\texttt{P1} = \begin{bmatrix}
- f & 0 & cx & 0 \\
- 0 & f & cy_1 & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix}\)
- \(\texttt{P2} = \begin{bmatrix}
- f & 0 & cx & 0 \\
- 0 & f & cy_2 & T_y \cdot f \\
- 0 & 0 & 1 & 0
- \end{bmatrix},\)
- \(\texttt{Q} = \begin{bmatrix}
- 1 & 0 & 0 & -cx \\
- 0 & 1 & 0 & -cy_1 \\
- 0 & 0 & 0 & f \\
- 0 & 0 & -\frac{1}{T_y} & \frac{cy_1 - cy_2}{T_y}
- \end{bmatrix} \)
- where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
- REF: CALIB_ZERO_DISPARITY is set.
- As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
- matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
- initialize the rectification map for each camera.
- See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
- the corresponding image regions. This means that the images are well rectified, which is what most
- stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
- their interiors are all valid pixels.
- </dd>
- </dl>
- </li>
- </ul>
- <a name="stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoRectify</h4>
- <pre>public static void stereoRectify(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags)</pre>
- <div class="block">Computes rectification transforms for each head of a calibrated stereo camera.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>cameraMatrix1</code> - First camera intrinsic matrix.</dd>
- <dd><code>distCoeffs1</code> - First camera distortion parameters.</dd>
- <dd><code>cameraMatrix2</code> - Second camera intrinsic matrix.</dd>
- <dd><code>distCoeffs2</code> - Second camera distortion parameters.</dd>
- <dd><code>imageSize</code> - Size of the image used for stereo calibration.</dd>
- <dd><code>R</code> - Rotation matrix from the coordinate system of the first camera to the second camera,
- see REF: stereoCalibrate.</dd>
- <dd><code>T</code> - Translation vector from the coordinate system of the first camera to the second camera,
- see REF: stereoCalibrate.</dd>
- <dd><code>R1</code> - Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
- brings points given in the unrectified first camera's coordinate system to points in the rectified
- first camera's coordinate system. In more technical terms, it performs a change of basis from the
- unrectified first camera's coordinate system to the rectified first camera's coordinate system.</dd>
- <dd><code>R2</code> - Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
- brings points given in the unrectified second camera's coordinate system to points in the rectified
- second camera's coordinate system. In more technical terms, it performs a change of basis from the
- unrectified second camera's coordinate system to the rectified second camera's coordinate system.</dd>
- <dd><code>P1</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- camera, i.e. it projects points given in the rectified first camera coordinate system into the
- rectified first camera's image.</dd>
- <dd><code>P2</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- camera, i.e. it projects points given in the rectified first camera coordinate system into the
- rectified second camera's image.</dd>
- <dd><code>Q</code> - Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).</dd>
- <dd><code>flags</code> - Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
- the function makes the principal points of each camera have the same pixel coordinates in the
- rectified views. And if the flag is not set, the function may still shift the images in the
- horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- useful image area.
- scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
- images are zoomed and shifted so that only valid pixels are visible (no black areas after
- rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
- pixels from the original images from the cameras are retained in the rectified images (no source
- image pixels are lost). Any intermediate value yields an intermediate result between
- those two extreme cases.
- #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
- preserve details in the original image, especially when there is a big radial distortion.
- are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- (see the picture below).
- are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- (see the picture below).
- The function computes the rotation matrices for each camera that (virtually) make both camera image
- planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
- the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
- as input. As output, it provides two rotation matrices and also two projection matrices in the new
- coordinates. The function distinguishes the following two cases:
- <ul>
- <li>
- <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
- mainly along the x-axis (with possible small vertical shift). In the rectified images, the
- corresponding epipolar lines in the left and right cameras are horizontal and have the same
- y-coordinate. P1 and P2 look like:
- </li>
- </ul>
- \(\texttt{P1} = \begin{bmatrix}
- f & 0 & cx_1 & 0 \\
- 0 & f & cy & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix}\)
- \(\texttt{P2} = \begin{bmatrix}
- f & 0 & cx_2 & T_x \cdot f \\
- 0 & f & cy & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix} ,\)
- \(\texttt{Q} = \begin{bmatrix}
- 1 & 0 & 0 & -cx_1 \\
- 0 & 1 & 0 & -cy \\
- 0 & 0 & 0 & f \\
- 0 & 0 & -\frac{1}{T_x} & \frac{cx_1 - cx_2}{T_x}
- \end{bmatrix} \)
- where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
- REF: CALIB_ZERO_DISPARITY is set.
- <ul>
- <li>
- <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
- mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
- lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
- </li>
- </ul>
- \(\texttt{P1} = \begin{bmatrix}
- f & 0 & cx & 0 \\
- 0 & f & cy_1 & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix}\)
- \(\texttt{P2} = \begin{bmatrix}
- f & 0 & cx & 0 \\
- 0 & f & cy_2 & T_y \cdot f \\
- 0 & 0 & 1 & 0
- \end{bmatrix},\)
- \(\texttt{Q} = \begin{bmatrix}
- 1 & 0 & 0 & -cx \\
- 0 & 1 & 0 & -cy_1 \\
- 0 & 0 & 0 & f \\
- 0 & 0 & -\frac{1}{T_y} & \frac{cy_1 - cy_2}{T_y}
- \end{bmatrix} \)
- where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
- REF: CALIB_ZERO_DISPARITY is set.
- As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
- matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
- initialize the rectification map for each camera.
- See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
- the corresponding image regions. This means that the images are well rectified, which is what most
- stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
- their interiors are all valid pixels.
- </dd>
- </dl>
- </li>
- </ul>
- <a name="stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoRectify</h4>
- <pre>public static void stereoRectify(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags,
- double alpha)</pre>
- <div class="block">Computes rectification transforms for each head of a calibrated stereo camera.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>cameraMatrix1</code> - First camera intrinsic matrix.</dd>
- <dd><code>distCoeffs1</code> - First camera distortion parameters.</dd>
- <dd><code>cameraMatrix2</code> - Second camera intrinsic matrix.</dd>
- <dd><code>distCoeffs2</code> - Second camera distortion parameters.</dd>
- <dd><code>imageSize</code> - Size of the image used for stereo calibration.</dd>
- <dd><code>R</code> - Rotation matrix from the coordinate system of the first camera to the second camera,
- see REF: stereoCalibrate.</dd>
- <dd><code>T</code> - Translation vector from the coordinate system of the first camera to the second camera,
- see REF: stereoCalibrate.</dd>
- <dd><code>R1</code> - Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
- brings points given in the unrectified first camera's coordinate system to points in the rectified
- first camera's coordinate system. In more technical terms, it performs a change of basis from the
- unrectified first camera's coordinate system to the rectified first camera's coordinate system.</dd>
- <dd><code>R2</code> - Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
- brings points given in the unrectified second camera's coordinate system to points in the rectified
- second camera's coordinate system. In more technical terms, it performs a change of basis from the
- unrectified second camera's coordinate system to the rectified second camera's coordinate system.</dd>
- <dd><code>P1</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- camera, i.e. it projects points given in the rectified first camera coordinate system into the
- rectified first camera's image.</dd>
- <dd><code>P2</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- camera, i.e. it projects points given in the rectified first camera coordinate system into the
- rectified second camera's image.</dd>
- <dd><code>Q</code> - Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).</dd>
- <dd><code>flags</code> - Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
- the function makes the principal points of each camera have the same pixel coordinates in the
- rectified views. And if the flag is not set, the function may still shift the images in the
- horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- useful image area.</dd>
- <dd><code>alpha</code> - Free scaling parameter. If it is -1 or absent, the function performs the default
- scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
- images are zoomed and shifted so that only valid pixels are visible (no black areas after
- rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
- pixels from the original images from the cameras are retained in the rectified images (no source
- image pixels are lost). Any intermediate value yields an intermediate result between
- those two extreme cases.
- #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
- preserve details in the original image, especially when there is a big radial distortion.
- are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- (see the picture below).
- are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- (see the picture below).
- The function computes the rotation matrices for each camera that (virtually) make both camera image
- planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
- the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
- as input. As output, it provides two rotation matrices and also two projection matrices in the new
- coordinates. The function distinguishes the following two cases:
- <ul>
- <li>
- <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
- mainly along the x-axis (with possible small vertical shift). In the rectified images, the
- corresponding epipolar lines in the left and right cameras are horizontal and have the same
- y-coordinate. P1 and P2 look like:
- </li>
- </ul>
- \(\texttt{P1} = \begin{bmatrix}
- f & 0 & cx_1 & 0 \\
- 0 & f & cy & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix}\)
- \(\texttt{P2} = \begin{bmatrix}
- f & 0 & cx_2 & T_x \cdot f \\
- 0 & f & cy & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix} ,\)
- \(\texttt{Q} = \begin{bmatrix}
- 1 & 0 & 0 & -cx_1 \\
- 0 & 1 & 0 & -cy \\
- 0 & 0 & 0 & f \\
- 0 & 0 & -\frac{1}{T_x} & \frac{cx_1 - cx_2}{T_x}
- \end{bmatrix} \)
- where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
- REF: CALIB_ZERO_DISPARITY is set.
- <ul>
- <li>
- <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
- mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
- lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
- </li>
- </ul>
- \(\texttt{P1} = \begin{bmatrix}
- f & 0 & cx & 0 \\
- 0 & f & cy_1 & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix}\)
- \(\texttt{P2} = \begin{bmatrix}
- f & 0 & cx & 0 \\
- 0 & f & cy_2 & T_y \cdot f \\
- 0 & 0 & 1 & 0
- \end{bmatrix},\)
- \(\texttt{Q} = \begin{bmatrix}
- 1 & 0 & 0 & -cx \\
- 0 & 1 & 0 & -cy_1 \\
- 0 & 0 & 0 & f \\
- 0 & 0 & -\frac{1}{T_y} & \frac{cy_1 - cy_2}{T_y}
- \end{bmatrix} \)
- where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
- REF: CALIB_ZERO_DISPARITY is set.
- As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
- matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
- initialize the rectification map for each camera.
- See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
- the corresponding image regions. This means that the images are well rectified, which is what most
- stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
- their interiors are all valid pixels.
- </dd>
- </dl>
- </li>
- </ul>
- <a name="stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-org.opencv.core.Size-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoRectify</h4>
- <pre>public static void stereoRectify(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags,
- double alpha,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImageSize)</pre>
- <div class="block">Computes rectification transforms for each head of a calibrated stereo camera.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>cameraMatrix1</code> - First camera intrinsic matrix.</dd>
- <dd><code>distCoeffs1</code> - First camera distortion parameters.</dd>
- <dd><code>cameraMatrix2</code> - Second camera intrinsic matrix.</dd>
- <dd><code>distCoeffs2</code> - Second camera distortion parameters.</dd>
- <dd><code>imageSize</code> - Size of the image used for stereo calibration.</dd>
- <dd><code>R</code> - Rotation matrix from the coordinate system of the first camera to the second camera,
- see REF: stereoCalibrate.</dd>
- <dd><code>T</code> - Translation vector from the coordinate system of the first camera to the second camera,
- see REF: stereoCalibrate.</dd>
- <dd><code>R1</code> - Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
- brings points given in the unrectified first camera's coordinate system to points in the rectified
- first camera's coordinate system. In more technical terms, it performs a change of basis from the
- unrectified first camera's coordinate system to the rectified first camera's coordinate system.</dd>
- <dd><code>R2</code> - Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
- brings points given in the unrectified second camera's coordinate system to points in the rectified
- second camera's coordinate system. In more technical terms, it performs a change of basis from the
- unrectified second camera's coordinate system to the rectified second camera's coordinate system.</dd>
- <dd><code>P1</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- camera, i.e. it projects points given in the rectified first camera coordinate system into the
- rectified first camera's image.</dd>
- <dd><code>P2</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- camera, i.e. it projects points given in the rectified first camera coordinate system into the
- rectified second camera's image.</dd>
- <dd><code>Q</code> - Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).</dd>
- <dd><code>flags</code> - Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
- the function makes the principal points of each camera have the same pixel coordinates in the
- rectified views. And if the flag is not set, the function may still shift the images in the
- horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- useful image area.</dd>
- <dd><code>alpha</code> - Free scaling parameter. If it is -1 or absent, the function performs the default
- scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
- images are zoomed and shifted so that only valid pixels are visible (no black areas after
- rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
- pixels from the original images from the cameras are retained in the rectified images (no source
- image pixels are lost). Any intermediate value yields an intermediate result between
- those two extreme cases.</dd>
- <dd><code>newImageSize</code> - New image resolution after rectification. The same size should be passed to
- #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
- preserve details in the original image, especially when there is a big radial distortion.
- are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- (see the picture below).
- are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- (see the picture below).
- The function computes the rotation matrices for each camera that (virtually) make both camera image
- planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
- the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
- as input. As output, it provides two rotation matrices and also two projection matrices in the new
- coordinates. The function distinguishes the following two cases:
- <ul>
- <li>
- <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
- mainly along the x-axis (with possible small vertical shift). In the rectified images, the
- corresponding epipolar lines in the left and right cameras are horizontal and have the same
- y-coordinate. P1 and P2 look like:
- </li>
- </ul>
- \(\texttt{P1} = \begin{bmatrix}
- f & 0 & cx_1 & 0 \\
- 0 & f & cy & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix}\)
- \(\texttt{P2} = \begin{bmatrix}
- f & 0 & cx_2 & T_x \cdot f \\
- 0 & f & cy & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix} ,\)
- \(\texttt{Q} = \begin{bmatrix}
- 1 & 0 & 0 & -cx_1 \\
- 0 & 1 & 0 & -cy \\
- 0 & 0 & 0 & f \\
- 0 & 0 & -\frac{1}{T_x} & \frac{cx_1 - cx_2}{T_x}
- \end{bmatrix} \)
- where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
- REF: CALIB_ZERO_DISPARITY is set.
- <ul>
- <li>
- <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
- mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
- lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
- </li>
- </ul>
- \(\texttt{P1} = \begin{bmatrix}
- f & 0 & cx & 0 \\
- 0 & f & cy_1 & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix}\)
- \(\texttt{P2} = \begin{bmatrix}
- f & 0 & cx & 0 \\
- 0 & f & cy_2 & T_y \cdot f \\
- 0 & 0 & 1 & 0
- \end{bmatrix},\)
- \(\texttt{Q} = \begin{bmatrix}
- 1 & 0 & 0 & -cx \\
- 0 & 1 & 0 & -cy_1 \\
- 0 & 0 & 0 & f \\
- 0 & 0 & -\frac{1}{T_y} & \frac{cy_1 - cy_2}{T_y}
- \end{bmatrix} \)
- where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
- REF: CALIB_ZERO_DISPARITY is set.
- As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
- matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
- initialize the rectification map for each camera.
- See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
- the corresponding image regions. This means that the images are well rectified, which is what most
- stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
- their interiors are all valid pixels.
- </dd>
- </dl>
- </li>
- </ul>
- <a name="stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-org.opencv.core.Size-org.opencv.core.Rect-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoRectify</h4>
- <pre>public static void stereoRectify(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags,
- double alpha,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImageSize,
- <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> validPixROI1)</pre>
- <div class="block">Computes rectification transforms for each head of a calibrated stereo camera.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>cameraMatrix1</code> - First camera intrinsic matrix.</dd>
- <dd><code>distCoeffs1</code> - First camera distortion parameters.</dd>
- <dd><code>cameraMatrix2</code> - Second camera intrinsic matrix.</dd>
- <dd><code>distCoeffs2</code> - Second camera distortion parameters.</dd>
- <dd><code>imageSize</code> - Size of the image used for stereo calibration.</dd>
- <dd><code>R</code> - Rotation matrix from the coordinate system of the first camera to the second camera,
- see REF: stereoCalibrate.</dd>
- <dd><code>T</code> - Translation vector from the coordinate system of the first camera to the second camera,
- see REF: stereoCalibrate.</dd>
- <dd><code>R1</code> - Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
- brings points given in the unrectified first camera's coordinate system to points in the rectified
- first camera's coordinate system. In more technical terms, it performs a change of basis from the
- unrectified first camera's coordinate system to the rectified first camera's coordinate system.</dd>
- <dd><code>R2</code> - Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
- brings points given in the unrectified second camera's coordinate system to points in the rectified
- second camera's coordinate system. In more technical terms, it performs a change of basis from the
- unrectified second camera's coordinate system to the rectified second camera's coordinate system.</dd>
- <dd><code>P1</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- camera, i.e. it projects points given in the rectified first camera coordinate system into the
- rectified first camera's image.</dd>
- <dd><code>P2</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- camera, i.e. it projects points given in the rectified first camera coordinate system into the
- rectified second camera's image.</dd>
- <dd><code>Q</code> - Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).</dd>
- <dd><code>flags</code> - Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
- the function makes the principal points of each camera have the same pixel coordinates in the
- rectified views. And if the flag is not set, the function may still shift the images in the
- horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- useful image area.</dd>
- <dd><code>alpha</code> - Free scaling parameter. If it is -1 or absent, the function performs the default
- scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
- images are zoomed and shifted so that only valid pixels are visible (no black areas after
- rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
- pixels from the original images from the cameras are retained in the rectified images (no source
- image pixels are lost). Any intermediate value yields an intermediate result between
- those two extreme cases.</dd>
- <dd><code>newImageSize</code> - New image resolution after rectification. The same size should be passed to
- #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
- preserve details in the original image, especially when there is a big radial distortion.</dd>
- <dd><code>validPixROI1</code> - Optional output rectangles inside the rectified images where all the pixels
- are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- (see the picture below).
- are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- (see the picture below).
- The function computes the rotation matrices for each camera that (virtually) make both camera image
- planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
- the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
- as input. As output, it provides two rotation matrices and also two projection matrices in the new
- coordinates. The function distinguishes the following two cases:
- <ul>
- <li>
- <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
- mainly along the x-axis (with possible small vertical shift). In the rectified images, the
- corresponding epipolar lines in the left and right cameras are horizontal and have the same
- y-coordinate. P1 and P2 look like:
- </li>
- </ul>
- \(\texttt{P1} = \begin{bmatrix}
- f & 0 & cx_1 & 0 \\
- 0 & f & cy & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix}\)
- \(\texttt{P2} = \begin{bmatrix}
- f & 0 & cx_2 & T_x \cdot f \\
- 0 & f & cy & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix} ,\)
- \(\texttt{Q} = \begin{bmatrix}
- 1 & 0 & 0 & -cx_1 \\
- 0 & 1 & 0 & -cy \\
- 0 & 0 & 0 & f \\
- 0 & 0 & -\frac{1}{T_x} & \frac{cx_1 - cx_2}{T_x}
- \end{bmatrix} \)
- where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
- REF: CALIB_ZERO_DISPARITY is set.
- <ul>
- <li>
- <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
- mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
- lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
- </li>
- </ul>
- \(\texttt{P1} = \begin{bmatrix}
- f & 0 & cx & 0 \\
- 0 & f & cy_1 & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix}\)
- \(\texttt{P2} = \begin{bmatrix}
- f & 0 & cx & 0 \\
- 0 & f & cy_2 & T_y \cdot f \\
- 0 & 0 & 1 & 0
- \end{bmatrix},\)
- \(\texttt{Q} = \begin{bmatrix}
- 1 & 0 & 0 & -cx \\
- 0 & 1 & 0 & -cy_1 \\
- 0 & 0 & 0 & f \\
- 0 & 0 & -\frac{1}{T_y} & \frac{cy_1 - cy_2}{T_y}
- \end{bmatrix} \)
- where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
- REF: CALIB_ZERO_DISPARITY is set.
- As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
- matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
- initialize the rectification map for each camera.
- See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
- the corresponding image regions. This means that the images are well rectified, which is what most
- stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
- their interiors are all valid pixels.
- </dd>
- </dl>
- </li>
- </ul>
- <a name="stereoRectify-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-int-double-org.opencv.core.Size-org.opencv.core.Rect-org.opencv.core.Rect-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoRectify</h4>
- <pre>public static void stereoRectify(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs2,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imageSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> T,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> Q,
- int flags,
- double alpha,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> newImageSize,
- <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> validPixROI1,
- <a href="../../../org/opencv/core/Rect.html" title="class in org.opencv.core">Rect</a> validPixROI2)</pre>
- <div class="block">Computes rectification transforms for each head of a calibrated stereo camera.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>cameraMatrix1</code> - First camera intrinsic matrix.</dd>
- <dd><code>distCoeffs1</code> - First camera distortion parameters.</dd>
- <dd><code>cameraMatrix2</code> - Second camera intrinsic matrix.</dd>
- <dd><code>distCoeffs2</code> - Second camera distortion parameters.</dd>
- <dd><code>imageSize</code> - Size of the image used for stereo calibration.</dd>
- <dd><code>R</code> - Rotation matrix from the coordinate system of the first camera to the second camera,
- see REF: stereoCalibrate.</dd>
- <dd><code>T</code> - Translation vector from the coordinate system of the first camera to the second camera,
- see REF: stereoCalibrate.</dd>
- <dd><code>R1</code> - Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
- brings points given in the unrectified first camera's coordinate system to points in the rectified
- first camera's coordinate system. In more technical terms, it performs a change of basis from the
- unrectified first camera's coordinate system to the rectified first camera's coordinate system.</dd>
- <dd><code>R2</code> - Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
- brings points given in the unrectified second camera's coordinate system to points in the rectified
- second camera's coordinate system. In more technical terms, it performs a change of basis from the
- unrectified second camera's coordinate system to the rectified second camera's coordinate system.</dd>
- <dd><code>P1</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- camera, i.e. it projects points given in the rectified first camera coordinate system into the
- rectified first camera's image.</dd>
- <dd><code>P2</code> - Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- camera, i.e. it projects points given in the rectified first camera coordinate system into the
- rectified second camera's image.</dd>
- <dd><code>Q</code> - Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).</dd>
- <dd><code>flags</code> - Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
- the function makes the principal points of each camera have the same pixel coordinates in the
- rectified views. And if the flag is not set, the function may still shift the images in the
- horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- useful image area.</dd>
- <dd><code>alpha</code> - Free scaling parameter. If it is -1 or absent, the function performs the default
- scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
- images are zoomed and shifted so that only valid pixels are visible (no black areas after
- rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
- pixels from the original images from the cameras are retained in the rectified images (no source
- image pixels are lost). Any intermediate value yields an intermediate result between
- those two extreme cases.</dd>
- <dd><code>newImageSize</code> - New image resolution after rectification. The same size should be passed to
- #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
- preserve details in the original image, especially when there is a big radial distortion.</dd>
- <dd><code>validPixROI1</code> - Optional output rectangles inside the rectified images where all the pixels
- are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- (see the picture below).</dd>
- <dd><code>validPixROI2</code> - Optional output rectangles inside the rectified images where all the pixels
- are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
- (see the picture below).
- The function computes the rotation matrices for each camera that (virtually) make both camera image
- planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
- the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
- as input. As output, it provides two rotation matrices and also two projection matrices in the new
- coordinates. The function distinguishes the following two cases:
- <ul>
- <li>
- <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
- mainly along the x-axis (with possible small vertical shift). In the rectified images, the
- corresponding epipolar lines in the left and right cameras are horizontal and have the same
- y-coordinate. P1 and P2 look like:
- </li>
- </ul>
- \(\texttt{P1} = \begin{bmatrix}
- f & 0 & cx_1 & 0 \\
- 0 & f & cy & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix}\)
- \(\texttt{P2} = \begin{bmatrix}
- f & 0 & cx_2 & T_x \cdot f \\
- 0 & f & cy & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix} ,\)
- \(\texttt{Q} = \begin{bmatrix}
- 1 & 0 & 0 & -cx_1 \\
- 0 & 1 & 0 & -cy \\
- 0 & 0 & 0 & f \\
- 0 & 0 & -\frac{1}{T_x} & \frac{cx_1 - cx_2}{T_x}
- \end{bmatrix} \)
- where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
- REF: CALIB_ZERO_DISPARITY is set.
- <ul>
- <li>
- <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
- mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
- lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
- </li>
- </ul>
- \(\texttt{P1} = \begin{bmatrix}
- f & 0 & cx & 0 \\
- 0 & f & cy_1 & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix}\)
- \(\texttt{P2} = \begin{bmatrix}
- f & 0 & cx & 0 \\
- 0 & f & cy_2 & T_y \cdot f \\
- 0 & 0 & 1 & 0
- \end{bmatrix},\)
- \(\texttt{Q} = \begin{bmatrix}
- 1 & 0 & 0 & -cx \\
- 0 & 1 & 0 & -cy_1 \\
- 0 & 0 & 0 & f \\
- 0 & 0 & -\frac{1}{T_y} & \frac{cy_1 - cy_2}{T_y}
- \end{bmatrix} \)
- where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
- REF: CALIB_ZERO_DISPARITY is set.
- As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
- matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
- initialize the rectification map for each camera.
- See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
- the corresponding image regions. This means that the images are well rectified, which is what most
- stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
- their interiors are all valid pixels.
- </dd>
- </dl>
- </li>
- </ul>
- <a name="stereoRectifyUncalibrated-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoRectifyUncalibrated</h4>
- <pre>public static boolean stereoRectifyUncalibrated(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imgSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> H1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> H2)</pre>
- <div class="block">Computes a rectification transform for an uncalibrated stereo camera.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of feature points in the first image.</dd>
- <dd><code>points2</code> - The corresponding points in the second image. The same formats as in
- #findFundamentalMat are supported.</dd>
- <dd><code>F</code> - Input fundamental matrix. It can be computed from the same set of point pairs using
- #findFundamentalMat .</dd>
- <dd><code>imgSize</code> - Size of the image.</dd>
- <dd><code>H1</code> - Output rectification homography matrix for the first image.</dd>
- <dd><code>H2</code> - Output rectification homography matrix for the second image.
- than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
- for which \(|\texttt{points2[i]}^T \cdot \texttt{F} \cdot \texttt{points1[i]}|>\texttt{threshold}\) )
- are rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
- The function computes the rectification transformations without knowing intrinsic parameters of the
- cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
- related difference from #stereoRectify is that the function outputs not the rectification
- transformations in the object (3D) space, but the planar perspective transformations encoded by the
- homography matrices H1 and H2 . The function implements the algorithm CITE: Hartley99 .
- <b>Note:</b>
- While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
- depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,
- it would be better to correct it before computing the fundamental matrix and calling this
- function. For example, distortion coefficients can be estimated for each head of stereo camera
- separately by using #calibrateCamera . Then, the images can be corrected using #undistort , or
- just the point coordinates can be corrected with #undistortPoints .</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="stereoRectifyUncalibrated-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Size-org.opencv.core.Mat-org.opencv.core.Mat-double-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>stereoRectifyUncalibrated</h4>
- <pre>public static boolean stereoRectifyUncalibrated(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> F,
- <a href="../../../org/opencv/core/Size.html" title="class in org.opencv.core">Size</a> imgSize,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> H1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> H2,
- double threshold)</pre>
- <div class="block">Computes a rectification transform for an uncalibrated stereo camera.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>points1</code> - Array of feature points in the first image.</dd>
- <dd><code>points2</code> - The corresponding points in the second image. The same formats as in
- #findFundamentalMat are supported.</dd>
- <dd><code>F</code> - Input fundamental matrix. It can be computed from the same set of point pairs using
- #findFundamentalMat .</dd>
- <dd><code>imgSize</code> - Size of the image.</dd>
- <dd><code>H1</code> - Output rectification homography matrix for the first image.</dd>
- <dd><code>H2</code> - Output rectification homography matrix for the second image.</dd>
- <dd><code>threshold</code> - Optional threshold used to filter out the outliers. If the parameter is greater
- than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
- for which \(|\texttt{points2[i]}^T \cdot \texttt{F} \cdot \texttt{points1[i]}|>\texttt{threshold}\) )
- are rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
- The function computes the rectification transformations without knowing intrinsic parameters of the
- cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
- related difference from #stereoRectify is that the function outputs not the rectification
- transformations in the object (3D) space, but the planar perspective transformations encoded by the
- homography matrices H1 and H2 . The function implements the algorithm CITE: Hartley99 .
- <b>Note:</b>
- While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
- depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,
- it would be better to correct it before computing the fundamental matrix and calling this
- function. For example, distortion coefficients can be estimated for each head of stereo camera
- separately by using #calibrateCamera . Then, the images can be corrected using #undistort , or
- just the point coordinates can be corrected with #undistortPoints .</dd>
- <dt><span class="returnLabel">Returns:</span></dt>
- <dd>automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="triangulatePoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>triangulatePoints</h4>
- <pre>public static void triangulatePoints(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projMatr1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projMatr2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projPoints1,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> projPoints2,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> points4D)</pre>
- <div class="block">This function reconstructs 3-dimensional points (in homogeneous coordinates) by using
- their observations with a stereo camera.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>projMatr1</code> - 3x4 projection matrix of the first camera, i.e. this matrix projects 3D points
- given in the world's coordinate system into the first image.</dd>
- <dd><code>projMatr2</code> - 3x4 projection matrix of the second camera, i.e. this matrix projects 3D points
- given in the world's coordinate system into the second image.</dd>
- <dd><code>projPoints1</code> - 2xN array of feature points in the first image. In the case of the c++ version,
- it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.</dd>
- <dd><code>projPoints2</code> - 2xN array of corresponding points in the second image. In the case of the c++
- version, it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.</dd>
- <dd><code>points4D</code> - 4xN array of reconstructed points in homogeneous coordinates. These points are
- returned in the world's coordinate system.
- <b>Note:</b>
- Keep in mind that all input data should be of float type in order for this function to work.
- <b>Note:</b>
- If the projection matrices from REF: stereoRectify are used, then the returned points are
- represented in the first camera's rectified coordinate system.
- SEE:
- reprojectImageTo3D</dd>
- </dl>
- </li>
- </ul>
- <a name="undistort-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>undistort</h4>
- <pre>public static void undistort(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs)</pre>
- <div class="block">Transforms an image to compensate for lens distortion.
- The function transforms an image to compensate radial and tangential lens distortion.
- The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap
- (with bilinear interpolation). See the former function for details of the transformation being
- performed.
- Those pixels in the destination image, for which there is no correspondent pixels in the source
- image, are filled with zeros (black color).
- A particular subset of the source image that will be visible in the corrected image can be regulated
- by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate
- newCameraMatrix depending on your requirements.
- The camera matrix and the distortion parameters can be determined using #calibrateCamera. If
- the resolution of images is different from the resolution used at the calibration stage, \(f_x,
- f_y, c_x\) and \(c_y\) need to be scaled accordingly, while the distortion coefficients remain
- the same.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - Input (distorted) image.</dd>
- <dd><code>dst</code> - Output (corrected) image that has the same size and type as src .</dd>
- <dd><code>cameraMatrix</code> - Input camera matrix \(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- cameraMatrix but you may additionally scale and shift the result by using a different matrix.</dd>
- </dl>
- </li>
- </ul>
- <a name="undistort-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>undistort</h4>
- <pre>public static void undistort(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> newCameraMatrix)</pre>
- <div class="block">Transforms an image to compensate for lens distortion.
- The function transforms an image to compensate radial and tangential lens distortion.
- The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap
- (with bilinear interpolation). See the former function for details of the transformation being
- performed.
- Those pixels in the destination image, for which there is no correspondent pixels in the source
- image, are filled with zeros (black color).
- A particular subset of the source image that will be visible in the corrected image can be regulated
- by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate
- newCameraMatrix depending on your requirements.
- The camera matrix and the distortion parameters can be determined using #calibrateCamera. If
- the resolution of images is different from the resolution used at the calibration stage, \(f_x,
- f_y, c_x\) and \(c_y\) need to be scaled accordingly, while the distortion coefficients remain
- the same.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - Input (distorted) image.</dd>
- <dd><code>dst</code> - Output (corrected) image that has the same size and type as src .</dd>
- <dd><code>cameraMatrix</code> - Input camera matrix \(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>newCameraMatrix</code> - Camera matrix of the distorted image. By default, it is the same as
- cameraMatrix but you may additionally scale and shift the result by using a different matrix.</dd>
- </dl>
- </li>
- </ul>
- <a name="undistortImagePoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>undistortImagePoints</h4>
- <pre>public static void undistortImagePoints(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs)</pre>
- <div class="block">Compute undistorted image points position</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - Observed points position, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or
- CV_64FC2) (or vector<Point2f> ).</dd>
- <dd><code>dst</code> - Output undistorted points position (1xN/Nx1 2-channel or vector<Point2f> ).</dd>
- <dd><code>cameraMatrix</code> - Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .</dd>
- <dd><code>distCoeffs</code> - Distortion coefficients</dd>
- </dl>
- </li>
- </ul>
- <a name="undistortImagePoints-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.TermCriteria-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>undistortImagePoints</h4>
- <pre>public static void undistortImagePoints(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> arg1)</pre>
- <div class="block">Compute undistorted image points position</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - Observed points position, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or
- CV_64FC2) (or vector<Point2f> ).</dd>
- <dd><code>dst</code> - Output undistorted points position (1xN/Nx1 2-channel or vector<Point2f> ).</dd>
- <dd><code>cameraMatrix</code> - Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .</dd>
- <dd><code>distCoeffs</code> - Distortion coefficients</dd>
- <dd><code>arg1</code> - automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="undistortPoints-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>undistortPoints</h4>
- <pre>public static void undistortPoints(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> src,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs)</pre>
- <div class="block">Computes the ideal point coordinates from the observed point coordinates.
- The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
- sparse set of points instead of a raster image. Also the function performs a reverse transformation
- to #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
- planar object, it does, up to a translation vector, if the proper R is specified.
- For each observed point coordinate \((u, v)\) the function computes:
- \(
- \begin{array}{l}
- x^{"} \leftarrow (u - c_x)/f_x \\
- y^{"} \leftarrow (v - c_y)/f_y \\
- (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
- {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\
- x \leftarrow X/W \\
- y \leftarrow Y/W \\
- \text{only performed if P is specified:} \\
- u' \leftarrow x {f'}_x + {c'}_x \\
- v' \leftarrow y {f'}_y + {c'}_y
- \end{array}
- \)
- where *undistort* is an approximate iterative algorithm that estimates the normalized original
- point coordinates out of the normalized distorted point coordinates ("normalized" means that the
- coordinates do not depend on the camera matrix).
- The function can be used for both a stereo camera head or a monocular camera (when R is empty).</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
- vector<Point2f> ).</dd>
- <dd><code>dst</code> - Output ideal point coordinates (1xN/Nx1 2-channel or vector<Point2f> ) after undistortion and reverse perspective
- transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.</dd>
- <dd><code>cameraMatrix</code> - Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
- #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
- #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.</dd>
- </dl>
- </li>
- </ul>
- <a name="undistortPoints-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>undistortPoints</h4>
- <pre>public static void undistortPoints(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> src,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R)</pre>
- <div class="block">Computes the ideal point coordinates from the observed point coordinates.
- The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
- sparse set of points instead of a raster image. Also the function performs a reverse transformation
- to #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
- planar object, it does, up to a translation vector, if the proper R is specified.
- For each observed point coordinate \((u, v)\) the function computes:
- \(
- \begin{array}{l}
- x^{"} \leftarrow (u - c_x)/f_x \\
- y^{"} \leftarrow (v - c_y)/f_y \\
- (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
- {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\
- x \leftarrow X/W \\
- y \leftarrow Y/W \\
- \text{only performed if P is specified:} \\
- u' \leftarrow x {f'}_x + {c'}_x \\
- v' \leftarrow y {f'}_y + {c'}_y
- \end{array}
- \)
- where *undistort* is an approximate iterative algorithm that estimates the normalized original
- point coordinates out of the normalized distorted point coordinates ("normalized" means that the
- coordinates do not depend on the camera matrix).
- The function can be used for both a stereo camera head or a monocular camera (when R is empty).</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
- vector<Point2f> ).</dd>
- <dd><code>dst</code> - Output ideal point coordinates (1xN/Nx1 2-channel or vector<Point2f> ) after undistortion and reverse perspective
- transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.</dd>
- <dd><code>cameraMatrix</code> - Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>R</code> - Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by
- #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
- #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.</dd>
- </dl>
- </li>
- </ul>
- <a name="undistortPoints-org.opencv.core.MatOfPoint2f-org.opencv.core.MatOfPoint2f-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>undistortPoints</h4>
- <pre>public static void undistortPoints(<a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> src,
- <a href="../../../org/opencv/core/MatOfPoint2f.html" title="class in org.opencv.core">MatOfPoint2f</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P)</pre>
- <div class="block">Computes the ideal point coordinates from the observed point coordinates.
- The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
- sparse set of points instead of a raster image. Also the function performs a reverse transformation
- to #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
- planar object, it does, up to a translation vector, if the proper R is specified.
- For each observed point coordinate \((u, v)\) the function computes:
- \(
- \begin{array}{l}
- x^{"} \leftarrow (u - c_x)/f_x \\
- y^{"} \leftarrow (v - c_y)/f_y \\
- (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
- {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\
- x \leftarrow X/W \\
- y \leftarrow Y/W \\
- \text{only performed if P is specified:} \\
- u' \leftarrow x {f'}_x + {c'}_x \\
- v' \leftarrow y {f'}_y + {c'}_y
- \end{array}
- \)
- where *undistort* is an approximate iterative algorithm that estimates the normalized original
- point coordinates out of the normalized distorted point coordinates ("normalized" means that the
- coordinates do not depend on the camera matrix).
- The function can be used for both a stereo camera head or a monocular camera (when R is empty).</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
- vector<Point2f> ).</dd>
- <dd><code>dst</code> - Output ideal point coordinates (1xN/Nx1 2-channel or vector<Point2f> ) after undistortion and reverse perspective
- transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.</dd>
- <dd><code>cameraMatrix</code> - Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .</dd>
- <dd><code>distCoeffs</code> - Input vector of distortion coefficients
- \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
- of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.</dd>
- <dd><code>R</code> - Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by
- #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.</dd>
- <dd><code>P</code> - New camera matrix (3x3) or new projection matrix (3x4) \(\begin{bmatrix} {f'}_x & 0 & {c'}_x & t_x \\ 0 & {f'}_y & {c'}_y & t_y \\ 0 & 0 & 1 & t_z \end{bmatrix}\). P1 or P2 computed by
- #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.</dd>
- </dl>
- </li>
- </ul>
- <a name="undistortPointsIter-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.Mat-org.opencv.core.TermCriteria-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>undistortPointsIter</h4>
- <pre>public static void undistortPointsIter(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> src,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> dst,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cameraMatrix,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> distCoeffs,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> R,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> P,
- <a href="../../../org/opencv/core/TermCriteria.html" title="class in org.opencv.core">TermCriteria</a> criteria)</pre>
- <div class="block"><b>Note:</b> Default version of #undistortPoints does 5 iterations to compute undistorted points.</div>
- <dl>
- <dt><span class="paramLabel">Parameters:</span></dt>
- <dd><code>src</code> - automatically generated</dd>
- <dd><code>dst</code> - automatically generated</dd>
- <dd><code>cameraMatrix</code> - automatically generated</dd>
- <dd><code>distCoeffs</code> - automatically generated</dd>
- <dd><code>R</code> - automatically generated</dd>
- <dd><code>P</code> - automatically generated</dd>
- <dd><code>criteria</code> - automatically generated</dd>
- </dl>
- </li>
- </ul>
- <a name="validateDisparity-org.opencv.core.Mat-org.opencv.core.Mat-int-int-">
- <!-- -->
- </a>
- <ul class="blockList">
- <li class="blockList">
- <h4>validateDisparity</h4>
- <pre>public static void validateDisparity(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> disparity,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cost,
- int minDisparity,
- int numberOfDisparities)</pre>
- </li>
- </ul>
- <a name="validateDisparity-org.opencv.core.Mat-org.opencv.core.Mat-int-int-int-">
- <!-- -->
- </a>
- <ul class="blockListLast">
- <li class="blockList">
- <h4>validateDisparity</h4>
- <pre>public static void validateDisparity(<a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> disparity,
- <a href="../../../org/opencv/core/Mat.html" title="class in org.opencv.core">Mat</a> cost,
- int minDisparity,
- int numberOfDisparities,
- int disp12MaxDisp)</pre>
- </li>
- </ul>
- </li>
- </ul>
- </li>
- </ul>
- </div>
- </div>
- <!-- ========= END OF CLASS DATA ========= -->
- <!-- ======= START OF BOTTOM NAVBAR ====== -->
- <div class="bottomNav"><a name="navbar.bottom">
- <!-- -->
- </a>
- <div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
- <a name="navbar.bottom.firstrow">
- <!-- -->
- </a>
- <ul class="navList" title="Navigation">
- <li><a href="../../../overview-summary.html">Overview</a></li>
- <li><a href="package-summary.html">Package</a></li>
- <li class="navBarCell1Rev">Class</li>
- <li><a href="package-tree.html">Tree</a></li>
- <li><a href="../../../index-all.html">Index</a></li>
- <li><a href="../../../help-doc.html">Help</a></li>
- </ul>
- <div class="aboutLanguage">
- <script>
- var url = window.location.href;
- var pos = url.lastIndexOf('/javadoc/');
- url = pos >= 0 ? (url.substring(0, pos) + '/javadoc/mymath.js') : (window.location.origin + '/mymath.js');
- var script = document.createElement('script');
- script.src = 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS-MML_HTMLorMML,' + url;
- document.getElementsByTagName('head')[0].appendChild(script);
- </script>
- </div>
- </div>
- <div class="subNav">
- <ul class="navList">
- <li>Prev Class</li>
- <li><a href="../../../org/opencv/calib3d/StereoBM.html" title="class in org.opencv.calib3d"><span class="typeNameLink">Next Class</span></a></li>
- </ul>
- <ul class="navList">
- <li><a href="../../../index.html?org/opencv/calib3d/Calib3d.html" target="_top">Frames</a></li>
- <li><a href="Calib3d.html" target="_top">No Frames</a></li>
- </ul>
- <ul class="navList" id="allclasses_navbar_bottom">
- <li><a href="../../../allclasses-noframe.html">All Classes</a></li>
- </ul>
- <div>
- <script type="text/javascript"><!--
- allClassesLink = document.getElementById("allclasses_navbar_bottom");
- if(window==top) {
- allClassesLink.style.display = "block";
- }
- else {
- allClassesLink.style.display = "none";
- }
- //-->
- </script>
- </div>
- <div>
- <ul class="subNavList">
- <li>Summary: </li>
- <li>Nested | </li>
- <li><a href="#field.summary">Field</a> | </li>
- <li><a href="#constructor.summary">Constr</a> | </li>
- <li><a href="#method.summary">Method</a></li>
- </ul>
- <ul class="subNavList">
- <li>Detail: </li>
- <li><a href="#field.detail">Field</a> | </li>
- <li><a href="#constructor.detail">Constr</a> | </li>
- <li><a href="#method.detail">Method</a></li>
- </ul>
- </div>
- <a name="skip.navbar.bottom">
- <!-- -->
- </a></div>
- <!-- ======== END OF BOTTOM NAVBAR ======= -->
- <p class="legalCopy"><small>Generated on 2023-06-28 12:47:21 / OpenCV 4.8.0</small></p>
- </body>
- </html>
|