play.py 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201
  1. import datetime
  2. import sys
  3. import time
  4. from collections import defaultdict, OrderedDict
  5. import logging
  6. from random import randint, random
  7. import socket
  8. import cPickle as pickle
  9. import numpy as np
  10. import matplotlib.pyplot as plt
  11. import pylab as lab
  12. from scipy.interpolate import griddata
  13. import scipy
  14. from mpl_toolkits.axes_grid1 import make_axes_locatable
  15. from matplotlib.ticker import FuncFormatter, FixedLocator
  16. from utils import cython_annotate
  17. from utils.xml import TreeToString, StringToTree, FileToTree
  18. from utils.path import path
  19. from lws import serve
  20. from lws import optimize
  21. from lws import evaluate
  22. evaluate.STAIR_HACK = True
  23. from lws.evaluate import Evaluator
  24. from lws.localize import Environment, HMMLocalizer, LMSELocalizer, Measurements
  25. if socket.gethostname() == 'freimann':
  26. AP_DATA = path('/home/rothe/loco/apdata/')
  27. THESIS_RESULTS = path('/home/rothe/loco/thesis_results')
  28. THESIS_FIGURES = path('/home/rothe/loco/thesis_results')
  29. LWS_TMP = path('/home/rothe/loco/lws-instance')
  30. OBJFILE = path('/home/rothe/loco/maps/UMIC/umic.obj')
  31. LOCFILE = path('/home/rothe/loco/maps/UMIC/experiment/locations.txt')
  32. LWS_INI = r'/home/rothe/loco/lws/instance_nostromo/lws_local.ini'
  33. elif socket.gethostname() == 'nostromo':
  34. AP_DATA = path('/home/dirk/lws-instance/tmp/apdata/')
  35. THESIS_RESULTS = path('/home/dirk/thesis_results')
  36. THESIS_FIGURES = path('/home/dirk/thesis_results')
  37. LWS_TMP = path('/home/dirk/lws-instance/tmp')
  38. OBJFILE = path('/home/dirk/loco/maps/UMIC/umic.obj')
  39. LOCFILE = path('/home/dirk/loco/maps/UMIC/experiment/locations.txt')
  40. TMP = LWS_TMP
  41. else:
  42. AP_DATA = path('D:/loco-dev/dirk/_tmp/apdata/')
  43. THESIS_RESULTS = path(r'D:\loco-dev\dirk\thesis\results')
  44. THESIS_FIGURES = path(r'D:\loco-dev\dirk\thesis\figures')
  45. LWS_TMP = path(r'D:\loco-dev\dirk\tmp')
  46. OBJFILE = path(r'D:\loco-dev\maps\UMIC\umic.obj')
  47. LOCFILE = path(r'D:\loco-dev\maps\UMIC\experiment\locations.txt')
  48. LWS_INI = r'D:\loco-dev\dirk\lws\instance_nostromo\lws_local.ini'
  49. CONFIGURED_PATHS = {u'stairs_upward': [u'16', u'17', u'27', u'26', u'11', u'7', u'24', u'25', u'72', u'60', u'28'], u'og1_long_straight': [u'15', u'13', u'11', u'7', u'6', u'8', u'9'], u'og1_classic': [u'4', u'3', u'71', u'6', u'8', u'9', u'10', u'30'], u'og1_room_change': [u'73', u'73', u'1', u'71', u'6', u'74', u'75', u'31', u'31'], u'og1_eg': [u'9', u'8', u'71', u'7', u'11', u'26', u'27', u'17', u'16', u'35', u'39'], u'og1_long_rooms': [u'29', u'98', u'9', u'8', u'6', u'7', u'11', u'13', u'94', u'96', u'97', u'95', u'94', u'13', u'14', u'14'], u'eg_room_change': [u'76', u'100', u'22', u'23', u'102', u'84', u'84', u'102', u'23', u'101', u'86', u'86'], u'og1_eg_right': [u'4', u'3', u'71', u'7', u'11', u'26', u'27', u'17', u'18', u'23', u'99', u'81', u'80'], u'still_73': [u'73', u'73'], u'still_2': [u'2', u'2'], u'still_14': [u'14', u'14'], u'still_26': [u'26', u'26'], u'still_90': [u'90', u'90'], u'still_60': [u'60', u'60'], u'still_19': [u'19', u'19'], u'still_35': [u'35', u'35'], u'still_16': [u'16', u'16'], u'still_64': [u'64', u'64'], u'still_95': [u'95', u'95'], u'stairs_upward_r': [u'28', u'60', u'72', u'25', u'24', u'7', u'11', u'26', u'27', u'17', u'16'], u'og1_long_straight_r': [u'9', u'8', u'6', u'7', u'11', u'13', u'15'], u'og1_classic_r': [u'30', u'10', u'9', u'8', u'6', u'71', u'3', u'4'], u'og1_room_change_r': [u'31', u'31', u'75', u'74', u'6', u'71', u'1', u'73', u'73'], u'og1_eg_r': [u'39', u'35', u'16', u'17', u'27', u'26', u'11', u'7', u'71', u'8', u'9'], u'og1_long_rooms_r': [u'14', u'14', u'13', u'94', u'95', u'97', u'96', u'94', u'13', u'11', u'7', u'6', u'8', u'9', u'98', u'29'], u'eg_room_change_r': [u'86', u'86', u'101', u'23', u'102', u'84', u'84', u'102', u'23', u'22', u'100', u'76'], u'og1_eg_right_r': [u'80', u'81', u'99', u'23', u'18', u'17', u'27', u'26', u'11', u'7', u'71', u'3', u'4']}
  50. def setupEnv(optrun, device, da_mode="none"):
  51. locfile = LOCFILE
  52. objfile = OBJFILE
  53. t = time.time()
  54. aps = [
  55. ('104', 44.8, 15.0, -1.0),
  56. ('106', 36.9, 7.5, 5.1),
  57. ('107', 35.3, 14.2, 1.0),
  58. ('108', 36.1, 4.4, 1.0),
  59. ('109p2', 51.7, 3.0, 1.0),
  60. ('110p2', 45.8, 15.0, 3.2),
  61. ('114', 36.6, 6.5, -1.5),
  62. ('171', 36.5, 9.55, 2.0),
  63. ('172', 23.4, 5.8, 2.6),
  64. ('edueg031', 41.6, 7.4, -1.0),
  65. ('eduegcorr1', 12.6, 4.9, -0.75),
  66. ('eduegcorr2', 31.6, 11.3, -1.0),
  67. ('eduegdemo', 28.7, 14.6, -2.6),
  68. ('eduog1108', 11.6, 6.4, 2.0),
  69. ('eduog1server', 48.5, 6.4, 2.0),
  70. ('eduog2206', 5.7, 6.35, 5.2),
  71. ('eduog2corr', 32.0, 11.2, 5.5),
  72. ('eduog2kitchen', 19.2, 9.2, 5.5),
  73. ('freya', 32.6, 10.5, 1.0),
  74. ('gaia', 45.8, 10.6, 1.0),
  75. ('hyperion', 39.3, 1.0, 0.1),
  76. ('iris', 9.8, 10.4, 0.1),
  77. ]
  78. dav = defaultdict(lambda: defaultdict(list))
  79. dav.update({
  80. # wave proagation trained
  81. 'all_5': {'iconia': [(-90.0, 0.1), (-80.0, -4.8), (-70.0, -1.1), (-60.0, -1.4), (-50.0, 4.6), (-40.0, 8.6), (-30.0, 4.8), (0.0, -0.7)]},
  82. # wave proagation trained
  83. 'all_32': {'iconia': [(-95.0, 6.4), (-92.0, 1.8), (-89.0, -0.1), (-86.0, -10.3), (-83.0, -4.1), (-80.0, -7.4), (-77.0, -8.8), (-74.0, -8.0), (-71.0, -7.4), (-68.0, -7.8), (-65.0, -7.3), (-62.0, -8.2), (-59.0, -5.4), (-56.0, -6.4), (-53.0, -7.2), (-50.0, -3.8), (-47.0, -5.8), (-44.0, 1.1), (-41.0, -1.8), (-38.0, -2.7), (-35.0, 0.2), (-32.0, -4.3), (-29.0, 6.8), (-26.0, -6.9), (-23.0, -4.9)]},
  84. #~ retrained da 'all_32': {'iconia': [(-95.0, 5.669932196007285), (-92.0, 1.276227839123189), (-89.0, -0.3662046945563398), (-86.0, -3.910946672699387), (-83.0, -3.8030051754485314), (-80.0, -6.492840168620569), (-77.0, -7.081285243423606), (-74.0, -7.7933713944929615), (-71.0, -7.865880407116174), (-68.0, -6.970590898308797), (-65.0, -7.5431398071828095), (-62.0, -7.535536597051766), (-59.0, -7.540053462598711), (-56.0, -7.242611183340358), (-53.0, -7.412929069196467), (-50.0, -5.847772074371091), (-47.0, -7.048747794215609), (-44.0, -7.730967657968016), (-41.0, 1.194441382213425), (-38.0, -1.3817805726231445), (-35.0, 3.41487719361011), (-32.0, -6.450405842872062), (-29.0, -1.2614925411163653), (-26.0, 1.4683801704451258), (-23.0, -0.5648552206848222)]},
  85. # independently trained
  86. 'iconia.highres_3': {'iconia': [(-95.0, 5.356757230730603), (-92.0, 5.989903962422883), (-89.0, 2.081951525948435), (-86.0, 0.9588198151073739), (-83.0, -1.1124684702684822), (-80.0, -2.2060971911363185), (-77.0, -1.144559292083103), (-74.0, -1.2737037746287196), (-71.0, -0.7898499867132481), (-68.0, -0.4132733920785634), (-65.0, -0.219021367593064), (-62.0, 0.22678720895050503), (-59.0, -0.4790131304298816), (-56.0, 0.2520500585382777), (-53.0, -0.36286191361626824), (-50.0, 2.6737368232446266), (-47.0, 5.298159624613441), (-44.0, 5.605423446284294), (-41.0, 3.12608476637685), (-38.0, -2.8778400578617354), (-35.0, -3.4049649437915774), (-32.0, 2.41610282901362), (-29.0, 7.067256082518609), (-26.0, 5.231612522065486), (-23.0, 3.89199737215653)]},
  87. 'iconia_1': {'iconia':
  88. #~ [(-95.0, -7.302880873503025), (-92.0, -6.694758275178181), (-89.0, 1.280934033820936), (-86.0, 0.2661560308241518), (-83.0, -1.571802042634903), (-80.0, -1.4792074535005466), (-77.0, -1.225551755750758), (-74.0, -1.6632332646292913), (-71.0, -2.3226899888806933), (-68.0, -3.217047979648854), (-65.0, -2.8368001035981942), (-62.0, 0.4547005033193087), (-59.0, -3.244675166899406), (-56.0, -0.5994222971766014), (-53.0, -1.6562485454932854), (-50.0, -2.3886916113837064), (-47.0, -2.2793905050446837), (-44.0, 4.383520219936498), (-41.0, 1.957141308155586), (-38.0, -2.693326949656976), (-35.0, 2.854732592997577), (-32.0, 4.379933428367942), (-29.0, 0.9737578282863675), (-26.0, 3.346395931141383), (-23.0, 0.014800485462740487)]
  89. [(-95.0, -6.433512328802718), (-92.0, -7.993010242814167), (-89.0, 1.3649206075960643), (-86.0, 0.34203843661928107), (-83.0, -1.8761612239248273), (-80.0, -1.5800910197184097), (-77.0, -1.1185837850999842), (-74.0, -1.643928026263407), (-71.0, -2.226572938639837), (-68.0, -3.232398498289264), (-65.0, -2.8969625256772926), (-62.0, 0.34267262521188646), (-59.0, -3.3148477294364294), (-56.0, -0.9644440841809467), (-53.0, -1.4559915032172799), (-50.0, -3.2200791355445113), (-47.0, -3.7953988874708044), (-44.0, 7.799418048689553), (-41.0, 1.2801888840531523), (-38.0, 3.19418608124801), (-35.0, 4.119867308670708), (-32.0, 2.847163413828114), (-29.0, 3.4342297691687125), (-26.0, -0.14288740547481826), (-23.0, 2.789553774192502)],
  90. 'nexus':
  91. [(-95.0, -7.588473017629795), (-92.0, -7.876997149866101), (-89.0, 1.192762329429918), (-86.0, 1.4726683640794338), (-83.0, -1.5084532180439334), (-80.0, -3.3604196283389434), (-77.0, -2.5763433828618614), (-74.0, -2.405101577549579), (-71.0, -2.8815204522284437), (-68.0, -1.9720598575908486), (-65.0, -3.632386359625647), (-62.0, 1.3371051319746396), (-59.0, -1.2207272863983676), (-56.0, 3.177681740637472), (-53.0, -0.4342385654168939), (-50.0, -3.26738775336728), (-47.0, 6.361639274860969), (-44.0, 7.639930984253997), (-41.0, -6.0701785135035635), (-38.0, 4.778900144664069), (-35.0, 0.3856527904317948), (-32.0, 2.5299922812760602), (-29.0, 2.0944858934288413), (-26.0, 3.607403552954609), (-23.0, 0.5346397357781538)],
  92. },
  93. 'iconia.da_1': {'iconia': [(-95.0, -6.2), (-92.0, 6.7), (-89.0, 3.1), (-86.0, -4.1), (-83.0, -3.2), (-80.0, -3.5), (-77.0, -4.3), (-74.0, -5.7), (-71.0, -4.3), (-68.0, -5.5), (-65.0, -5.7), (-62.0, -7.2), (-59.0, -4.4), (-56.0, -4.8), (-53.0, -4.0), (-50.0, -6.6), (-47.0, -3.3), (-44.0, -2.1), (-41.0, -2.5), (-38.0, -2.0), (-35.0, 7.8), (-32.0, 7.1), (-29.0, -0.3), (-26.0, 4.8), (-23.0, -6.6)]},
  94. 'all_33': {'nexus': [(-95.0, -7.747405712954486), (-92.0, 0.7879084115535535), (-89.0, 1.384257842940331), (-86.0, -0.6868423447611671), (-83.0, 0.250632029913987), (-80.0, -3.5828000936926125), (-77.0, -1.182936302415303), (-74.0, -1.6712164434707135), (-71.0, -0.6996437670397101), (-68.0, -2.61222572481969), (-65.0, -2.8840580605131394), (-62.0, 0.6064440052850679), (-59.0, 1.122592927547138), (-56.0, 3.6525671019872146), (-53.0, -0.6488967425344059), (-50.0, 6.073840311785539), (-47.0, 7.546394567569086), (-44.0, 7.248539328525121), (-41.0, 6.366992893871289), (-38.0, -6.568855844102413), (-35.0, 1.9217233943837269), (-32.0, -2.194340060602978), (-29.0, 2.587665589787904), (-26.0, 3.6322200868899004), (-23.0, -7.019096253161206)]},
  95. })
  96. # ensure every dict is a defaultdict
  97. for k, v in dav.items():
  98. dav[k] = defaultdict(list)
  99. dav[k].update(v)
  100. vi_path = AP_DATA.joinpath(optrun, 'umic_%s.dat')
  101. if da_mode == 'none':
  102. davalues = []
  103. elif da_mode == 'generate':
  104. lws = serve.LWS(LWS_INI, cfgoverlay={})
  105. evaluate.buildDeviceAdaptationArrays(lws, vi_path.parent)
  106. apid_locid2measurement = np.load(vi_path.parent.joinpath('%s_apid_locid2measurements.npy' % device))#[:1, :]
  107. apid_locid2estimated = np.load(vi_path.parent.joinpath('%s_apid_locid2estimates.npy' % device))#[:1, :]
  108. f = open('r:/temp.txt', 'w')
  109. while True:
  110. davalues, avg_delta = optimize.optimizeDeviceAdaption({device: apid_locid2measurement}, apid_locid2estimated, 100)
  111. f.write('%.3f %r\n' % (avg_delta, davalues))
  112. f.flush()
  113. print 'using davalues: %s' % davalues[device]
  114. davalues = davalues[device]
  115. elif da_mode == 'optrun':
  116. davalues = dav[optrun][device]
  117. print 'using stored (%s): %s' % (optrun, davalues)
  118. else:
  119. davalues = dav[da_mode][device]
  120. print 'using stored (%s): %s' % (da_mode, davalues)
  121. env = Environment(objfile=objfile, locationfile=locfile,
  122. tmpdir=LWS_TMP, aps=aps, vi_path=vi_path, davalues=davalues)
  123. print 'setup aps in %.3f sec' % (time.time() - t)
  124. return env
  125. def flatten_errors(errors):
  126. ''' return list of 2d errors'''
  127. errors2d = defaultdict(list)
  128. for pid, runs in errors.items():
  129. for runid, _errors in runs.items():
  130. for pname, e in _errors.items():
  131. errors2d[pname].append(e[1])
  132. return errors2d
  133. def flatten_errors_singlepath(errors):
  134. ''' return list of 2d errors'''
  135. errors2d = defaultdict(list)
  136. for runid, errors in errors.items():
  137. for pname, e in errors.items():
  138. errors2d[pname].append(e[1])
  139. return errors2d
  140. def loop_pf_num_particles(evaluator, pathid2runids, optrun, device):
  141. #~ evaluator.localizer.replace_below = 1e-40
  142. f = open(THESIS_RESULTS.joinpath('numparticles_loop_pf.txt'), 'w')
  143. f.write('start: %s\n' % datetime.datetime.now())
  144. f.write('device:%s optrun:%s\n' % (device, optrun))
  145. f.write('%s\n' % evaluator.localizer.params())
  146. f.flush()
  147. for cubewidth in (3,2,1):
  148. for num_particles in range(100, 10000, 1000):
  149. evaluator.localizer.num_particles = num_particles
  150. errors, failures = evaluator.evalAll(pathid2runids)
  151. errors2d = flatten_errors(errors)
  152. for pname in ('end', 'seq', 'seq_avg'):
  153. _errors = errors2d[pname]
  154. avgerr = sum(_errors) / float(len(_errors))
  155. medianerr = list(sorted(_errors))[len(_errors)/2]
  156. ff = ','.join('%s/%s' % e for e in failures)
  157. f.write('type:%s cw:%s nump:%s e2d_mean:%.3f e2d_median:%.3f fail:%s\n' % (pname, cubewidth, num_particles, avgerr, medianerr, ff))
  158. f.flush()
  159. def eval_results_num_particles():
  160. ll = THESIS_RESULTS.joinpath('numparticles_loop_pf.txt').lines()[3:-1]
  161. data = [dict(e.split(':') for e in l.split()) for l in ll]
  162. xs = []
  163. ys = []
  164. for d in data:
  165. if not d['type'] == 'seq_avg':
  166. continue
  167. xs.append(int(d['nump']))
  168. ys.append((float(d['e2d_mean']), float(d['e2d_median'])))
  169. lab.clf()
  170. lab.plot(xs, ys)
  171. lab.ylim((0, 4))
  172. #~ lab.xlim((0, 1))
  173. lab.grid()
  174. lab.show()
  175. def loop_pf_sigmas(evaluator, pathid2runids, optrun, device):
  176. xs = []
  177. ys = []
  178. #~ evaluator.localizer.replace_below = 1e-40
  179. f = open(THESIS_RESULTS.joinpath('sigma_loop_pf.txt'), 'w')
  180. f.write('start: %s\n' % datetime.datetime.now())
  181. f.write('device:%s optrun:%s\n' % (device, optrun))
  182. f.write('%s\n' % evaluator.localizer.params())
  183. f.flush()
  184. for esig in np.arange(2, 10, 0.5):
  185. for tsig in np.arange(2, 10, 0.5):
  186. evaluator.localizer.transition_sigmas = (tsig, tsig, tsig / 2.0)
  187. evaluator.localizer.emission_sigma = esig
  188. errors, failures = evaluator.evalAll(pathid2runids)
  189. errors2d = flatten_errors(errors)
  190. for pname in ('end', 'seq', 'seq_avg'):
  191. _errors = errors2d[pname]
  192. avgerr = sum(_errors) / float(len(_errors))
  193. medianerr = list(sorted(_errors))[len(_errors)/2]
  194. ff = ','.join('%s/%s' % e for e in failures)
  195. f.write('type:%s esig:%s tsig:%s e2d_mean:%.3f e2d_median:%.3f fail:%s\n' % (pname, esig, tsig, avgerr, medianerr, ff))
  196. f.flush()
  197. #~ values.append(avgerr)
  198. #~ ys.append(values)
  199. #~ lab.clf()
  200. #~ lab.plot(xs, ys)
  201. #~ lab.legend(pnames)
  202. #~ lab.ylim((0, 4))
  203. #~ lab.xlim((0, 1))
  204. #~ lab.grid()
  205. #~ lab.show()
  206. #~ if isinstance(x, int):
  207. #~ lab.savefig('r:/%s_%03d.png' % (fname, x))
  208. #~ else:
  209. #~ lab.savefig('r:/%s_%s.png' % (fname, x))
  210. #~ return
  211. def latex_table_path_detail(errors, combine_forward_back=False, header=''):
  212. algo_errtype = errors.values()[0].keys()
  213. algo_errtype.sort()
  214. #~ print algo_errtype
  215. algo_errs = ['%s/%s' % e for e in algo_errtype]
  216. headers = {'hmm/seq': 'HMM', 'hmm/seq_avg': 'HMM/avg', 'hmm/end': 'HMM/off',
  217. 'pf/seq': 'PF', 'pf/end': 'PF/off', 'pf/seq_avg': 'PF/avg',
  218. 'lmse/seq': 'LMSE'}
  219. algo_errs = [headers[e] for e in algo_errs]
  220. noisedata = errors
  221. total_means = defaultdict(list)
  222. rows = []
  223. for pathid, l in sorted(noisedata.items()):
  224. if combine_forward_back and pathid.endswith('_r'):
  225. continue
  226. cells = ['{\small %s}' % pathid.replace('_', '-')]
  227. for (algo, errtype) in algo_errtype:
  228. mean = l[(algo, errtype)]
  229. total_means[(algo, errtype)].append(mean)
  230. if combine_forward_back:
  231. other_mean = noisedata[pathid + '_r'][(algo, errtype)]
  232. total_means[(algo, errtype)].append(other_mean)
  233. cells.append('%.2f' % ((mean + other_mean) / 2))
  234. else:
  235. cells.append('%.2f' % mean)
  236. rows.append(' & '.join(cells) + '\\\\ \\hline')
  237. print r'\begin{center}'
  238. print r' \begin{tabularx}{1.008\textwidth}{|X|c|c|c|c|c|c|c|}\hline'
  239. print r' \rowcolor[gray]{.85}'
  240. if header:
  241. print header
  242. print r' \rowcolor[gray]{.92}'
  243. print r' \textbf{Path} & ' + ' & '.join(r'{\small %s}' % e.replace('_', '-') for e in algo_errs) + '\\\\ \\hline\\hline'
  244. for row in rows:
  245. print ' ' + row
  246. mm = []
  247. vv = []
  248. for (algo, errtype) in algo_errtype:
  249. means = total_means[(algo, errtype)]
  250. mm.append(sum(means) / float(len(means)))
  251. vv.append(scipy.std(means))
  252. print r'\hline Mean in $m$ &', ' & '.join('%.2f' % e for e in mm), r'\\ \hline'
  253. print 'Stdev in $m$ &', ' & '.join('%.2f' % e for e in vv), r'\\ \hline'
  254. print r' \end{tabularx}'
  255. print r'\end{center}'
  256. def eval_results_sigmas():
  257. ll = path(THESIS_RESULTS.joinpath('sigma_loop_pf.txt')).lines()[3:-1]
  258. destfile = THESIS_FIGURES.joinpath('evaluator_brute_force.png')
  259. data = [dict(e.split(':') for e in l.split()) for l in ll]
  260. #~ for d in data:
  261. #~ print d['e2d_mean'], d['e2d_median']
  262. aspectRatio = 0.5
  263. dpi = 90
  264. width = 1000
  265. figsize = width / float(dpi), width / float(dpi) * aspectRatio
  266. fig = plt.figure(figsize=figsize)
  267. csets = []
  268. axs = []
  269. contour_levels = np.linspace(1.8, 3.3, 30)
  270. for errtype, plotidx, title in [('end', 121, 'Particle Filter offline error (LE)'), ('seq_avg', 122, 'Particle Filter online error (LE)')]:
  271. ax = fig.add_subplot(plotidx) # projection='3d'
  272. ax.set_title('%s' % title, y=1.15, fontsize=15)
  273. axs.append(ax)
  274. xi = np.linspace(2,10,128)
  275. yi = np.linspace(2,10,128)
  276. xs = []
  277. ys = []
  278. zs = []
  279. for d in data:
  280. if not d['type'] == errtype:
  281. continue
  282. #~ print float(d['tsig']), float(d['esig'])
  283. if float(d['tsig']) < 3.0 or float(d['esig']) < 3:
  284. continue
  285. xs.append(float(d['tsig']))
  286. ys.append(float(d['esig']))
  287. zs.append(float(d['e2d_mean']))
  288. zi = griddata((xs, ys), zs, (xi[None,:], yi[:,None]), method='cubic')
  289. xim, yim = np.meshgrid(xi, yi)
  290. #~ plt.contour(xi,yi,zi,15,linewidths=0.5,colors='k')
  291. #~ print len(zi), len(xi)
  292. #~ ax.scatter(xs, ys, zs, label='errtype: %s' % errtype)
  293. #~ ax.plot_wireframe(xim, yim, zi, label='xx')
  294. #~ ax.plot_surface(xim, yim, zi)
  295. cset = ax.contourf(xim, yim, zi, 30) #contour_levels
  296. csets.append(cset)
  297. # Add the contour line levels to the colorbar
  298. #~ cbar.add_lines(cset)
  299. ax.set_xlim((min(xs), max(xs)))
  300. ax.set_ylim((min(ys), max(ys)))
  301. ax.set_xlabel('$\sigma$ for Transition Probabilities')
  302. ax.set_ylabel('$\sigma$ for Emission Probabilities')
  303. #~ ax.set_zlabel('error')
  304. fig.subplots_adjust(bottom=0.08, left=0.04, right=0.99, top=0.91)
  305. for ax, cset in zip(axs, csets):
  306. divider = make_axes_locatable(ax)
  307. cax = divider.append_axes("top", size="5%", pad=0.3)
  308. ticks=np.arange(1.7, 3.5, 0.05)
  309. cbar = lab.colorbar(cset, cax, ticks=ticks, orientation='horizontal')
  310. #~ cbar.ax.set_ylabel('error')
  311. #~ plt.show()
  312. fig.savefig(destfile, format='png', dpi=dpi)
  313. #~ for l in ll:
  314. #~ if l.startswith('type:')
  315. #~ eval_results()
  316. COLORS = {
  317. ('hmm', 'end'): 'blue',
  318. ('hmm', 'seq'): 'blue',
  319. ('hmm', 'seq_avg'): '#88BBFF',
  320. ('pf', 'end'): 'red',
  321. ('pf', 'seq'): 'red',
  322. ('pf', 'seq_avg'): 'orange',
  323. ('lmse', 'seq'): '#33CC33',
  324. }
  325. def plot_results_synthetic():
  326. cube_width = 2
  327. THESIS_RESULTS = path('t:')
  328. destfile = THESIS_FIGURES.joinpath(r'evaluator_synthetic_%s.png' % cube_width)
  329. if socket.gethostname() == 'cere-kombinat':
  330. destfile = path(r'N:\thesis_results').joinpath(r'evaluator_synthetic_%s.png' % cube_width)
  331. THESIS_RESULTS = path(r'N:\thesis_results')
  332. destfile = path(r't:').joinpath(r'evaluator_synthetic_%s.png' % cube_width)
  333. THESIS_RESULTS = path(r't:')
  334. algos = [
  335. ('pf', ('end', 'seq', 'seq_avg')),
  336. ('hmm', ('end', 'seq', 'seq_avg')),
  337. ('lmse', ('seq', ))
  338. ]
  339. def _load(algo):
  340. ll = THESIS_RESULTS.joinpath('synthetic_eval_%s_%s_22.txt' % (cube_width, algo)).lines()[3:-1]
  341. return [dict(e.split(':') for e in l.split()) for l in ll if l.strip()]
  342. aspectRatio = 0.55
  343. dpi = 110
  344. width = 1000
  345. figsize = width / float(dpi), width / float(dpi) * aspectRatio
  346. fig = plt.figure(figsize=figsize)
  347. errsubtype = 'e2d_mean'
  348. plotidx2algo = {121: {('hmm', 'seq'), ('hmm', 'seq_avg'), ('pf', 'seq'), ('pf', 'seq_avg'), ('lmse', 'seq')},
  349. 122: {('hmm', 'end'), ('pf', 'end')}}
  350. labels = {'hmm/seq': 'HMM', 'hmm/seq_avg': 'HMM/Avg', 'hmm/end': 'HMM', 'pf/seq': 'PF', 'pf/end': 'PF', 'pf/seq_avg': 'PF/Avg', 'lmse/seq': 'LMSE'}
  351. plotidx2axs = {}
  352. plotidx2plotvalues = defaultdict(list)
  353. noise2path2errtype2mean = defaultdict(lambda: defaultdict(dict))
  354. for plotidx, title in [( 121, '2D Online Error (LE)'), (122, '2D Offline Error (LE)')]:
  355. ax = fig.add_subplot(plotidx) # projection='3d'
  356. plotidx2axs[plotidx] = ax
  357. ax.set_title('%s' % title, y=1.02, fontsize=15)
  358. min_x = 10**99
  359. max_x = -10**99
  360. ys_bypath = defaultdict(list)
  361. xs_bypath = defaultdict(list)
  362. for algo, errtypes in algos:
  363. data = _load(algo)
  364. for errtype in errtypes:
  365. if not (algo, errtype) in plotidx2algo[plotidx]:
  366. continue
  367. xs = []
  368. ys = []
  369. for d in data:
  370. if d['type'] != errtype:
  371. continue
  372. if 'pathid' in d:
  373. ys_bypath[(algo, errtype, errsubtype, d['pathid'])].append(float(d[errsubtype]))
  374. xs_bypath[(algo, errtype, errsubtype, d['pathid'])].append(float(d['sigma']))
  375. else:
  376. x = float(d['sigma'])
  377. xs.append(x)
  378. min_x = min(min_x, x)
  379. max_x = max(max_x, x)
  380. y = float(d[errsubtype])
  381. ys.append(y)
  382. ax.plot(xs, ys, label=labels['%s/%s' % (algo, errtype)], color=COLORS[(algo, errtype)], linewidth=2.0)
  383. plotidx2plotvalues[plotidx].append((xs, ys))
  384. #~ print xs_bypath, ys_bypath
  385. legend = ax.legend(loc='upper left')
  386. plt.setp(legend.get_texts(), fontsize='medium')
  387. #~ ltext.fontsize='small'
  388. ax.grid()
  389. ax.set_xlim((min_x, max_x))
  390. ax.set_ylim((0, 4))
  391. ax.set_ylabel('Localization Error in $m$')
  392. plt.setp(ax.get_xticklabels(), visible=False)
  393. divider = make_axes_locatable(ax)
  394. ax_path_variance = divider.append_axes("bottom", 1.2, pad=0.1, sharex=ax)
  395. ax_path_variance.set_xlim((min_x, max_x))
  396. ax_path_variance.set_ylim((0, 1))
  397. ax_path_variance.yaxis.set_major_formatter(FuncFormatter(lambda x, pos=None: '' if x >= 1 else '%.1f' % x))
  398. ax_path_variance.grid()
  399. ax_path_variance.set_ylabel('Stdev in $m$')
  400. ax_path_variance.set_xlabel('Noise $\sigma$')
  401. #~ NOISE_LEVEL = [0, 3, 6, 9, 12, 15]
  402. move_right = 0
  403. for algo, errtypes in algos:
  404. for errtype in errtypes:
  405. if not (algo, errtype) in plotidx2algo[plotidx]:
  406. continue
  407. means = defaultdict(list)
  408. for (_algo, _errtype, _errsubtype, pathid), sigmas in xs_bypath.items():
  409. for i, sigma in enumerate(sigmas):
  410. if (_algo, _errtype, _errsubtype) == (algo, errtype, errsubtype):
  411. #~ means[sigma].append(ys_bypath[(algo, errtype, errsubtype, pathid)][i])
  412. path_mean = ys_bypath[(algo, errtype, errsubtype, pathid)][i]
  413. means[int(sigma)].append(path_mean)
  414. noise2path2errtype2mean[sigma][pathid][(algo, errtype)] = path_mean
  415. path_variance = {}
  416. for sigma, ms in means.items():
  417. path_variance[sigma] = scipy.std(means[sigma])
  418. #~ mu = sum(means[sigma]) / float(len(means[sigma]))
  419. #~ path_variance[sigma] = (sum((m-mu)**2 for m in means[sigma]) / float(len(means[sigma])))**0.5
  420. if plotidx == 121:
  421. ww = 0.21
  422. else:
  423. ww = 0.4
  424. xs = [x + move_right * ww - ww/4.0 for x in path_variance.keys()]
  425. ax_path_variance.bar(xs, path_variance.values(), width=ww, linewidth=0.1, alpha=1.0, color=COLORS[(algo, errtype)])
  426. move_right += 1
  427. #~ ax_path_variance.bar(xs_bypath[k], ys_bypath[k], s=1.5, linewidth=0, alpha=0.8)
  428. fig.subplots_adjust(bottom=0.09, left=0.06, right=0.998, top=0.93)
  429. for xs, ys in plotidx2plotvalues[122]:
  430. plotidx2axs[121].plot(xs, ys, color='#99AA99')
  431. for xs, ys in plotidx2plotvalues[121]:
  432. plotidx2axs[122].plot(xs, ys, color='#99AA99')
  433. print 'saving to %s' % destfile.abspath()
  434. fig.savefig(destfile, format='png', dpi=dpi)
  435. noise = 14
  436. errors = noise2path2errtype2mean[noise]
  437. header = r' \multicolumn{8}{|>{\columncolor[gray]{.8}}c|}{\textbf{Online and Offline LEs in $m$ for Noise: $\sigma=%sdBm$ (forward$+$backward)}} \\ \hline' % noise
  438. #~ latex_table_path_detail(errors, True, header)
  439. for i in range(0, 18, 4):
  440. header = r' \multicolumn{8}{|>{\columncolor[gray]{.8}}c|}{\textbf{Online and Offline LEs in $m$ for Noise: $\sigma=%sdBm$ (forward$+$backward)}} \\ \hline' % i
  441. latex_table_path_detail(noise2path2errtype2mean[noise], False, header)
  442. #~ plot_results_synthetic()
  443. #~ sys.exit()
  444. def eval_synthetic():
  445. TRACKED_PATH = LWS_TMP.joinpath(r'tracked_path_synthetic')
  446. THESIS_RESULTS = path('t:/')
  447. env = setupEnv('iconia_1', 'iconia')
  448. pfconfig = {'num_particles': 20000, 'do_blocking': True} # 'smooth': 1.0
  449. hmmconfig = {'prune_to': 15000, 'freespace_scan': -1} # 'smooth': 1.0
  450. errortype = 'mean'
  451. LIMIT = 20
  452. NOISE_LIMIT = 11
  453. NOISE_LIMIT_LOWER = 10
  454. NOISE_STEP = 1
  455. cube_width = 2
  456. algos = [
  457. #~ ('pf', ('end', 'seq', 'seq_avg')),
  458. ('hmm', ('end', 'seq', 'seq_avg')),
  459. #~ ('lmse', ('seq', )),
  460. ]
  461. algo2f = {}
  462. for algo, errnames in algos:
  463. outfile = THESIS_RESULTS.joinpath('synthetic_eval_%s_%s_22.txt' % (cube_width, algo))
  464. print 'storing to %s' % outfile.abspath()
  465. f = open(outfile, 'w')
  466. f.write('start: %s\n' % datetime.datetime.now())
  467. f.write('device:%s optrun:%s\n' % ('iconia', 'inconia_1'))
  468. f.flush()
  469. algo2f[algo] = f
  470. for i, sigma in enumerate(np.arange(NOISE_LIMIT_LOWER, NOISE_LIMIT, NOISE_STEP)):
  471. for algo, errnames in algos:
  472. f = algo2f[algo]
  473. print sigma
  474. device = 'sigma_%.1f' % sigma
  475. evaluator = Evaluator(optrun, device, env, cube_width, TRACKED_PATH,
  476. algo=algo, verbose=False, output_html=False,
  477. errortype=errortype, interpolate=False,
  478. hmmconfig=hmmconfig,
  479. pfconfig=pfconfig
  480. )
  481. if i == 0:
  482. f.write('%s\n' % evaluator.localizer.params())
  483. pathid2runids = evaluate.getCollectedPathids2runids(device, CONFIGURED_PATHS, TRACKED_PATH, None)
  484. errors, failures = evaluator.evalAll(pathid2runids, limit=LIMIT)
  485. errors2d = flatten_errors(errors)
  486. for errname in errnames:
  487. _errors = errors2d[errname]
  488. avgerr = sum(_errors) / float(len(_errors))
  489. medianerr = list(sorted(_errors))[len(_errors)/2]
  490. ff = ','.join('%s/%s' % e for e in failures)
  491. f.write('type:%s sigma:%s e2d_mean:%.3f e2d_median:%.3f fail:%s\n' % (errname, sigma, avgerr, medianerr, ff))
  492. for pid, p_errors in errors.items():
  493. errors2d = flatten_errors_singlepath(p_errors)
  494. for errname in errnames:
  495. _errors = errors2d[errname]
  496. avgerr = sum(_errors) / float(len(_errors))
  497. medianerr = list(sorted(_errors))[len(_errors)/2]
  498. ff = ','.join('%s/%s' % e for e in failures)
  499. f.write('pathid:%s type:%s sigma:%s e2d_mean:%.3f e2d_median:%.3f fail:%s\n' % (pid, errname, sigma, avgerr, medianerr, ff))
  500. f.write('\n')
  501. f.flush()
  502. print '---'
  503. def eval_results_real(scenes, auto_plot=False):
  504. THESIS_RESULTS = path('t:/')
  505. TRACKED_PATH = path(r'D:\loco-dev\dirk\thesis\results\tracked_path')
  506. logging.basicConfig(level=logging.DEBUG)
  507. finished_scenes = OrderedDict()
  508. for scene_name, scene in scenes.items():
  509. if not scene.get('on', True):
  510. continue
  511. hmmconfig = scene.get('hmmconfig', {})
  512. pfconfig = scene.get('pfconfig', {})
  513. interpolate = scene.get('interpolate', False)
  514. errortype = 'mean'
  515. optrun = scene['optrun']
  516. device = scene['device']
  517. algo = scene['algo']
  518. cube_width = scene['cubewidth']
  519. damode = scene.get('damode', 'none')
  520. tracked_path = scene.get('tracked', TRACKED_PATH)
  521. limit = scene.get('limit', None)
  522. env = setupEnv(optrun, device, damode)
  523. evaluator = Evaluator(optrun, device, env, cube_width, tracked_path,
  524. algo=algo, verbose=False, output_html=False,
  525. errortype=errortype, interpolate=interpolate,
  526. hmmconfig=hmmconfig,
  527. pfconfig=pfconfig
  528. )
  529. pathid2runids = evaluate.getCollectedPathids2runids(device, CONFIGURED_PATHS, tracked_path, None)
  530. for pid in pathid2runids.keys():
  531. if pid.startswith('still_'):
  532. pathid2runids.pop(pid)
  533. errors, failures = evaluator.evalAll(pathid2runids, limit=limit)
  534. f = open(THESIS_RESULTS.joinpath('%s.errors' % scene_name), 'wb')
  535. for k, v in errors.items():
  536. errors[k] = dict(v)
  537. for k2, v2 in errors[k].items():
  538. errors[k][k2] = dict(v2)
  539. dumpable = dict(errors)
  540. pickle.dump(dumpable, f)
  541. f.close()
  542. finished_scenes[scene_name] = scene
  543. if auto_plot:
  544. plot_results_real(finished_scenes)
  545. def plot_results_real(scenes):
  546. THESIS_RESULTS = path('t:/')
  547. SCENE_COUNT2TEXT_OFFSET = defaultdict(float)
  548. SCENE_COUNT2TEXT_OFFSET.update({1:1, 2: 1.5, 3: 1.7, 4: 1.8, 18: 1.5})
  549. SCENE_COUNT2TEXT_SIZE = defaultdict(lambda: 20)
  550. SCENE_COUNT2TEXT_SIZE.update({3: 15, 4: 15, 18: 14})
  551. SCENE_COUNT2LABEL_OFFSET = defaultdict(lambda: 0.23)
  552. SCENE_COUNT2LABEL_OFFSET.update({4: 0.34})
  553. path_errors = defaultdict(dict)
  554. scene_errors = defaultdict(lambda: defaultdict(list))
  555. for scene_name, scene in scenes.items():
  556. f = open(THESIS_RESULTS.joinpath('%s.errors' % scene_name), 'rb')
  557. errors = pickle.load(f)
  558. for pid, _errors in errors.items():
  559. #~ print pid, _errors.keys()
  560. for errtype, es in flatten_errors_singlepath(_errors).items():
  561. if 'filter_err' in scene and scene['filter_err'] != errtype:
  562. continue
  563. scene_errors[scene_name][errtype].extend(es)
  564. path_errors[pid][(scene['algo'], errtype)] = scipy.mean(es)
  565. aspectRatio = 0.30
  566. dpi = 90
  567. width = 1000
  568. figsize = width / float(dpi), width / float(dpi) * aspectRatio
  569. fig = plt.figure(figsize=figsize)
  570. ax = fig.add_subplot(111) # projection='3d'
  571. xs = []
  572. ys = []
  573. colors = []
  574. err_std = []
  575. x = 0
  576. x_labels = defaultdict(str)
  577. scenetexts = []
  578. for scene_name, scene in scenes.items():
  579. errors = scene_errors[scene_name]
  580. scenetexts.append((x, scene_name))
  581. for errtype, es in errors.items():
  582. print scene_name, errtype, scipy.mean(es)
  583. xs.append(x-0.4)
  584. ys.append(scipy.mean(es))
  585. err_std.append(scipy.std(es))
  586. colors.append(COLORS[(scene['algo'], errtype)])
  587. x_labels[x] = '%s/%s' % (scene['algo'], errtype)
  588. x += 1
  589. if 'skip' in scene:
  590. x += scene['skip']
  591. elif len(scenes) < 4:
  592. x += 1
  593. else:
  594. x += 0.4
  595. ylimit = (max(ys) + max(err_std))*1.1
  596. for x, scene_name in scenetexts:
  597. if scenes[scene_name].get('use_title'):
  598. title = scenes[scene_name].get('title', scene_name)
  599. else:
  600. title = scene_name
  601. if len(scene_errors[scene_name]) == 3 or len(scenetexts) == 18:
  602. o = SCENE_COUNT2TEXT_OFFSET[len(scenetexts)]
  603. if len(scenetexts) == 18:
  604. if len(title) > 11:
  605. o += 0.55
  606. elif len(title) < 6:
  607. o -= 0.2
  608. else:
  609. o = 0.8
  610. if scenes[scene_name].get('no_title'):
  611. continue
  612. plt.text(x + o,
  613. ylimit, title, size=SCENE_COUNT2TEXT_SIZE[len(scenetexts)],
  614. ha="right", va="top",
  615. bbox = dict(boxstyle="square",
  616. ec=(1., 0.8, 0.8),
  617. fc=(1., 0.98, 0.98),
  618. )
  619. )
  620. fig.subplots_adjust(bottom=0.08, left=0.06, right=0.99, top=0.91)
  621. if len(scenes) < 10:
  622. labels = {'hmm/seq': 'HMM/on', 'hmm/seq_avg': 'HMM/avg', 'hmm/end': 'HMM/off',
  623. 'pf/seq': 'PF/on', 'pf/end': 'PF/off', 'pf/seq_avg': 'PF/avg',
  624. 'lmse/seq': 'LMSE', '':''}
  625. else:
  626. labels = {'hmm/seq': 'HMM', 'hmm/seq_avg': 'HMM', 'hmm/end': 'HMM',
  627. 'pf/seq': 'PF', 'pf/end': 'PF', 'pf/seq_avg': 'PF/avg',
  628. 'lmse/seq': 'LMSE', '':''}
  629. ax.xaxis.set_major_locator(FixedLocator(x_labels.keys()))
  630. ax.xaxis.set_major_formatter(FuncFormatter(lambda x, pos=None: labels[x_labels[x]]))
  631. if len(scenes) > 10:
  632. for tick in ax.xaxis.get_major_ticks():
  633. tick.label1.set_fontsize(12)
  634. rects = ax.bar(xs, ys, width=0.8, color='#DDDDDD', yerr=err_std,
  635. error_kw=dict(elinewidth=6, ecolor='grey'))
  636. if len(scenes) < 4:
  637. ax.set_xlim(-1, max(xs) + 1.3)
  638. else:
  639. ax.set_xlim(-0.6, max(xs) + 1)
  640. ax.set_ylim(0, ylimit)
  641. ax.set_ylabel('Localization Error in $m$')
  642. def autolabel(rects):
  643. # attach some text labels
  644. for rect in rects:
  645. height = rect.get_height()
  646. o = SCENE_COUNT2LABEL_OFFSET[len(scenetexts)]
  647. if len(scenes) > 10:
  648. o += 0.07
  649. plt.text(rect.get_x() + rect.get_width() / 2.0 + o,
  650. height*1.02,
  651. ('%.2f' % height) if len(scenes) < 10 else ('%.1f' % height),
  652. ha='center', va='bottom', fontsize= 11 if len(scenetexts) > 3 else 12)
  653. autolabel(rects)
  654. ax.grid(axis='y')
  655. namebase = '_'.join(scenes.keys())
  656. destfile = THESIS_FIGURES.joinpath(r't:/%s.png' % namebase)
  657. print 'saving to %s' % destfile.abspath()
  658. fig.savefig(destfile, format='png', dpi=dpi)
  659. #~ D:\loco-dev\dirk\thesis\figures
  660. #~ fig.savefig(destfile, format='png', dpi=dpi)
  661. latex_table_path_detail(path_errors, True)
  662. def parse_scenes(s, scenes):
  663. res = OrderedDict()
  664. for scene_name in s.split():
  665. if scene_name.startswith('#'):
  666. on = False
  667. scene_name = scene_name[1:]
  668. else:
  669. on = True
  670. if scene_name in scenes:
  671. res[scene_name] = scenes[scene_name]
  672. res[scene_name]['on'] = on
  673. else:
  674. print '"%s"' % scene_name
  675. UNKNOWN_SCENE
  676. return res
  677. def define_scenarios():
  678. scenes = OrderedDict()
  679. scenes['synth_old'] = {
  680. 'optrun': 'iconia_1',
  681. 'device': 'sigma_0.0',
  682. 'algo': 'hmm',
  683. 'tracked': LWS_TMP.joinpath(r'tracked_path_synthetic_old')
  684. }
  685. scenes['synth_new'] = d = dict(scenes['synth_old'])
  686. d['tracked'] = LWS_TMP.joinpath(r'tracked_path_synthetic')
  687. scenes['synth_interp_old'] = d = dict(scenes['synth_old'])
  688. d['interpolate'] = True
  689. pfconfig = {'num_particles': 15000,
  690. 'do_blocking': False,
  691. 'transition_sigmas': (4.0, 4.0, 2.0),
  692. #~ 'max_replace': 5000,
  693. #~ 'replace_below': 1e-10,
  694. #~ 'smooth': 1.0,
  695. #~ 'turnoff_blocking':1e1,
  696. }
  697. hmmconfig = {'prune_to': 8000, 'freespace_scan': -1}
  698. basic = { 'optrun': 'iconia_1',
  699. 'device': 'iconia',
  700. 'pfconfig': pfconfig,
  701. 'hmmconfig': hmmconfig,
  702. 'cubewidth' : 2,
  703. 'damode': 'none'
  704. }
  705. nexus = { 'optrun': 'all_33',
  706. 'device': 'nexus',
  707. 'pfconfig': pfconfig,
  708. 'hmmconfig': hmmconfig,
  709. 'cubewidth' : 2,
  710. 'interpolate': True,
  711. 'damode': 'none'
  712. }
  713. scenes['hmm_basic'] = hmm_basic = dict(basic)
  714. hmm_basic['algo'] = 'hmm'
  715. #~ hmm_basic['cubewidth'] = 3
  716. scenes['hmm_basic_ip'] = hmm_basic_ip = dict(hmm_basic)
  717. hmm_basic_ip['interpolate'] = True
  718. hmm_basic_ip['title'] = 'Iconia/HMM'
  719. scenes['hmm_basic_da'] = hmm_basic_da = dict(hmm_basic)
  720. hmm_basic_da['damode'] = 'generate'
  721. hmm_basic_da['title'] = 'Iconia/HMM'
  722. scenes['hmm_basic_ip_da'] = hmm_basic_ip_da = dict(hmm_basic_ip)
  723. hmm_basic_ip_da['damode'] = 'optrun'
  724. scenes['pf_basic'] = pf_basic = dict(basic)
  725. pf_basic['algo'] = 'pf'
  726. scenes['pf_basic_ip'] = pf_basic_ip = dict(pf_basic)
  727. pf_basic_ip['interpolate'] = False
  728. pf_basic_ip['title'] = 'Iconia/PF'
  729. scenes['pf_basic_da'] = pf_basic_da = dict(pf_basic)
  730. pf_basic_da['damode'] = 'generate'
  731. scenes['pf_basic_ip_da'] = pf_basic_ip_da = dict(pf_basic_ip)
  732. pf_basic_ip_da['damode'] = 'optrun'
  733. scenes['lmse_basic'] = lmse_basic = dict(basic)
  734. lmse_basic['algo'] = 'lmse'
  735. lmse_basic['limit'] = 5
  736. scenes['lmse_basic_ip'] = lmse_basic_ip = dict(lmse_basic)
  737. lmse_basic_ip['interpolate'] = True
  738. lmse_basic_ip['title'] = 'LMSE'
  739. scenes['lmse_basic_ip_da'] = lmse_basic_ip_da = dict(lmse_basic_ip)
  740. lmse_basic_ip_da['damode'] = 'optrun'
  741. scenes['pf_nexus_all'] = pf_nexus_all = dict(nexus)
  742. pf_nexus_all['algo'] = 'pf'
  743. pf_nexus_all['title'] = 'Nexus/PF'
  744. scenes['hmm_nexus_all'] = hmm_nexus_all = dict(nexus)
  745. hmm_nexus_all['algo'] = 'hmm'
  746. hmm_nexus_all['title'] = 'Nexus/HMM'
  747. scenes['pf_nexus_basic'] = pf_nexus_basic = dict(pf_nexus_all)
  748. pf_nexus_basic['optrun'] = 'iconia_1'
  749. scenes['hmm_nexus_basic'] = hmm_nexus_basic = dict(hmm_nexus_all)
  750. hmm_nexus_basic['obtrun'] = 'iconia_1'
  751. for t in 'pf_basic_ip', 'hmm_basic_ip':
  752. scenes[t.split('_')[0] + '_iconia_all'] = d = dict(scenes[t])
  753. d['optrun'] = 'all_33'
  754. d['damode'] = 'none'
  755. #~ d['title'] = d['title']
  756. #~ lmse_basic_ip_da['damode'] = 'optrun'
  757. for t in 'lmse_basic_ip_da', 'pf_basic_ip_da', 'hmm_basic_ip_da':
  758. scenes[t + '_hr'] = d = dict(scenes[t])
  759. d['optrun'] = 'iconia.da_1'
  760. d['damode'] = 'optrun'
  761. s = '#hmm_basic #hmm_basic_ip_da #hmm_basic_ip #hmm_basic_da'
  762. s = '#lmse_basic_ip_da hmm_basic_ip_da #pf_basic_ip_da'
  763. s = '#lmse_basic_ip #hmm_basic_ip #pf_basic_ip'
  764. #~ s = 'pf_basic_ip_da_hr'# lmse_basic_ip_da_hr hmm_basic_ip_da_hr
  765. s = ' hmm_nexus_all pf_nexus_all hmm_iconia_all pf_iconia_all '
  766. #~ s = '#hmm_nexus_basic #pf_nexus_basic #hmm_basic_ip #pf_basic_ip'
  767. #~ s = '#lmse_basic_ip #hmm_basic_ip #pf_basic_ip'
  768. #~ s = 'pf_nexus_basic'
  769. coarse_models =[
  770. ('full2', 'iconia.all2mat_2', 'Full2'),
  771. ('basic1', 'iconia.basic1mat_1', 'Basic1'),
  772. ('full1', 'iconia.all1mat_1', 'Full1'),
  773. ('basic2', 'iconia.basic2mat_2', 'Basic2'),
  774. ('basicdoors5', 'iconia.basic5mat_1', 'Basic+Doors5'),
  775. ('full11', 'iconia_1', 'Full11')]
  776. for name, optrun, title in coarse_models:
  777. scenes[name + '_lmse'] = d = dict(lmse_basic_ip)
  778. d['optrun'] = optrun
  779. d['title'] = title
  780. d['damode'] = 'none'
  781. d['skip'] = -0.1
  782. scenes[name + '_pf'] = d = dict(pf_basic_ip)
  783. d['optrun'] = optrun
  784. d['damode'] = 'none'
  785. d['filter_err'] = 'end'
  786. d['title'] = ''
  787. d['skip'] = -0.1
  788. scenes[name + '_hmm'] = d = dict(hmm_basic_ip)
  789. d['optrun'] = optrun
  790. d['damode'] = 'none'
  791. d['filter_err'] = 'end'
  792. d['title'] = ''
  793. d['skip'] = 0.2
  794. #
  795. #~ s = '''
  796. #~ #basic1_lmse #basic1_pf #basic1_hmm
  797. #~ #full1_lmse #full1_pf #full1_hmm
  798. #~ #basic2_lmse #basic2_pf #basic2_hmm
  799. #~ #full2_lmse #full2_pf #full2_hmm
  800. #~ #basicdoors5_lmse #basicdoors5_pf #basicdoors5_hmm
  801. #~ #full11_lmse #full11_pf #full11_hmm
  802. #~ '''
  803. res = parse_scenes(s, scenes)
  804. for scene in res.values():
  805. scene['limit'] = 2
  806. scene['use_title'] = True
  807. #~ scene['no_title'] = True
  808. #~ scene['damode'] = 'optrun'
  809. return res
  810. eval_results_real(define_scenarios(), auto_plot=True)
  811. plot_results_real(define_scenarios())
  812. sys.exit()
  813. if __name__ == '__main__':
  814. optrun = 'all_5'
  815. optrun = 'iconia_1'
  816. device = 'iconia'
  817. TRACKED_PATH = LWS_TMP.joinpath('tracked_path_synthetic')
  818. device = 'sigma_00'
  819. logging.basicConfig(level=logging.DEBUG)
  820. np.set_printoptions(linewidth=200, threshold=1000)
  821. cython_annotate('utils/accelerated.pyx')
  822. eval_synthetic()
  823. sys.exit()
  824. env = setupEnv(optrun, device)
  825. #~ import utils.accelerated as speed
  826. HMMLocalizer.no_signal_delta = 0
  827. cfgoverlay = {'scenes': {'umic': {'objfile': OBJFILE, 'locationfile': LOCFILE}},
  828. 'tmp_apdata': AP_DATA,
  829. 'tmp_optruns': r'N:\lws-instance\tmp\_optruns',
  830. 'tmp_tracked': TRACKED_PATH,
  831. }
  832. pathid = 'og1_classic'
  833. pathid = 'og1_eg_right'
  834. filter_pathid = 'og1_eg_right'
  835. filter_pathid = 'og1_classic'
  836. #~ filter_pathid = 'og1'
  837. #~ s = lws.evaluate(optrun, device, 3, refresh=True, filter=filter_pathid)
  838. #~ s = lws.getLocationsAndMeasurements('iconia')
  839. #~ print s
  840. #~ lws = server.LWS(r'D:\loco-dev\dirk\lws\instance_nostromo\lws.ini', cfgoverlay=cfgoverlay)
  841. #~ s = lws.evaluate(device, optrun, 3, refresh=True, filter=filter_pathid)
  842. #~ sys.exit()
  843. #~ open('r:/out.html', 'w').write(s.replace('src="/static/', r'src="D:/loco-dev/dirk/lws/static/'))
  844. #~ s = lws.evaluatePath(optrun, device, pathid=pathid, runid="05", setting="end", cubewidth=3, refresh=True)
  845. #~ s = s.replace('&', '&amp;')
  846. #~ tree = StringToTree(s)
  847. #~ for e in tree.xpath('//td[@id="imgs1"]/img'):
  848. #~ e.attrib['src'] = r'D:\loco-dev\dirk\tmp\evaluator\%s\%s_3_end_%s_05.png' % (optrun, device, pathid)
  849. #~ for e in tree.xpath('//td[@id="imgs2"]/img'):
  850. #~ e.attrib['src'] = r'D:\loco-dev\dirk\tmp\evaluator\%s\%s_3_seq_%s_05.png' % (optrun, device, pathid)
  851. #~ s = TreeToString(tree)
  852. #~ open('r:/out_path.html', 'w').write(s.replace('src="/static/', r'src="D:/loco-dev/dirk/lws/static/'))
  853. #~ errortype = 'mean'
  854. pfconfig = {'num_particles': 10000, 'do_blocking': True, 'smooth': 1.0}
  855. evaluator = Evaluator(optrun, device, env, 1, TRACKED_PATH,
  856. algo='lmse', verbose=False, output_html=False, pfconfig=pfconfig,
  857. errortype='mean', interpolate=False)
  858. #~ pathid2runids = {'og1_classic': ('05', '06', '07'),
  859. #~ 'og1_straight': ('02', '07'),
  860. #~ }
  861. #~ pathid2runids = {'og1_classic_r': ('02',)}
  862. pathid2runids = {'og1_classic': ('01',)}
  863. #~ pathid2runids = {'og1_long_rooms': ('09',)}
  864. #~ pathid2runids = {'og1_eg_right': ('11',)}
  865. #~ pathid2runids = evaluate.getCollectedPathids2runids(device, CONFIGURED_PATHS, TRACKED_PATH, None)
  866. #~ print pathid2runids
  867. #~ pathid2runids = {'stairs_upward_r': ('01',)}
  868. #~ errors = evaluator.evalAll(pathid2runids, limit=5)
  869. #~ loop_pf_sigma(evaluator, pathid2runids, optrun, device)
  870. #~ eval_results_sigmas()
  871. #~ loop_pf_num_particles(evaluator, pathid2runids, optrun, device)
  872. #~ eval_results_num_particles()
  873. #~ errors2d = flatten_errors(errors)
  874. #~ for pname, errors in errors2d:
  875. #~ cubewidth = 1
  876. localizer = HMMLocalizer(env, cubewidth=2, prune_to=10000, num_threads=4, verbose=False)
  877. #~ localizer.test()
  878. #~ cubewidth = 3
  879. #~ localizer = LMSELocalizer(env, cubewidth=cubewidth, num_threads=4, verbose=False)
  880. m = Measurements()
  881. m.load(r'D:\loco-dev\dirk\tmp\tracked_path_synthetic\sigma_0.0\og1_classic\measurements_00.txt')
  882. #~ m.load(r'R:\xx.txt')
  883. print localizer.evaluateMeasurements(m)[0]
  884. #~ for r in localizer.decoder.history[0]:
  885. #~ print r