| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201 |
- import datetime
- import sys
- import time
- from collections import defaultdict, OrderedDict
- import logging
- from random import randint, random
- import socket
- import cPickle as pickle
- import numpy as np
- import matplotlib.pyplot as plt
- import pylab as lab
- from scipy.interpolate import griddata
- import scipy
- from mpl_toolkits.axes_grid1 import make_axes_locatable
- from matplotlib.ticker import FuncFormatter, FixedLocator
-
- from utils import cython_annotate
- from utils.xml import TreeToString, StringToTree, FileToTree
- from utils.path import path
- from lws import serve
- from lws import optimize
- from lws import evaluate
- evaluate.STAIR_HACK = True
- from lws.evaluate import Evaluator
- from lws.localize import Environment, HMMLocalizer, LMSELocalizer, Measurements
- if socket.gethostname() == 'freimann':
- AP_DATA = path('/home/rothe/loco/apdata/')
- THESIS_RESULTS = path('/home/rothe/loco/thesis_results')
- THESIS_FIGURES = path('/home/rothe/loco/thesis_results')
- LWS_TMP = path('/home/rothe/loco/lws-instance')
- OBJFILE = path('/home/rothe/loco/maps/UMIC/umic.obj')
- LOCFILE = path('/home/rothe/loco/maps/UMIC/experiment/locations.txt')
- LWS_INI = r'/home/rothe/loco/lws/instance_nostromo/lws_local.ini'
- elif socket.gethostname() == 'nostromo':
- AP_DATA = path('/home/dirk/lws-instance/tmp/apdata/')
- THESIS_RESULTS = path('/home/dirk/thesis_results')
- THESIS_FIGURES = path('/home/dirk/thesis_results')
- LWS_TMP = path('/home/dirk/lws-instance/tmp')
- OBJFILE = path('/home/dirk/loco/maps/UMIC/umic.obj')
- LOCFILE = path('/home/dirk/loco/maps/UMIC/experiment/locations.txt')
- TMP = LWS_TMP
- else:
- AP_DATA = path('D:/loco-dev/dirk/_tmp/apdata/')
- THESIS_RESULTS = path(r'D:\loco-dev\dirk\thesis\results')
- THESIS_FIGURES = path(r'D:\loco-dev\dirk\thesis\figures')
- LWS_TMP = path(r'D:\loco-dev\dirk\tmp')
- OBJFILE = path(r'D:\loco-dev\maps\UMIC\umic.obj')
- LOCFILE = path(r'D:\loco-dev\maps\UMIC\experiment\locations.txt')
- LWS_INI = r'D:\loco-dev\dirk\lws\instance_nostromo\lws_local.ini'
-
- CONFIGURED_PATHS = {u'stairs_upward': [u'16', u'17', u'27', u'26', u'11', u'7', u'24', u'25', u'72', u'60', u'28'], u'og1_long_straight': [u'15', u'13', u'11', u'7', u'6', u'8', u'9'], u'og1_classic': [u'4', u'3', u'71', u'6', u'8', u'9', u'10', u'30'], u'og1_room_change': [u'73', u'73', u'1', u'71', u'6', u'74', u'75', u'31', u'31'], u'og1_eg': [u'9', u'8', u'71', u'7', u'11', u'26', u'27', u'17', u'16', u'35', u'39'], u'og1_long_rooms': [u'29', u'98', u'9', u'8', u'6', u'7', u'11', u'13', u'94', u'96', u'97', u'95', u'94', u'13', u'14', u'14'], u'eg_room_change': [u'76', u'100', u'22', u'23', u'102', u'84', u'84', u'102', u'23', u'101', u'86', u'86'], u'og1_eg_right': [u'4', u'3', u'71', u'7', u'11', u'26', u'27', u'17', u'18', u'23', u'99', u'81', u'80'], u'still_73': [u'73', u'73'], u'still_2': [u'2', u'2'], u'still_14': [u'14', u'14'], u'still_26': [u'26', u'26'], u'still_90': [u'90', u'90'], u'still_60': [u'60', u'60'], u'still_19': [u'19', u'19'], u'still_35': [u'35', u'35'], u'still_16': [u'16', u'16'], u'still_64': [u'64', u'64'], u'still_95': [u'95', u'95'], u'stairs_upward_r': [u'28', u'60', u'72', u'25', u'24', u'7', u'11', u'26', u'27', u'17', u'16'], u'og1_long_straight_r': [u'9', u'8', u'6', u'7', u'11', u'13', u'15'], u'og1_classic_r': [u'30', u'10', u'9', u'8', u'6', u'71', u'3', u'4'], u'og1_room_change_r': [u'31', u'31', u'75', u'74', u'6', u'71', u'1', u'73', u'73'], u'og1_eg_r': [u'39', u'35', u'16', u'17', u'27', u'26', u'11', u'7', u'71', u'8', u'9'], u'og1_long_rooms_r': [u'14', u'14', u'13', u'94', u'95', u'97', u'96', u'94', u'13', u'11', u'7', u'6', u'8', u'9', u'98', u'29'], u'eg_room_change_r': [u'86', u'86', u'101', u'23', u'102', u'84', u'84', u'102', u'23', u'22', u'100', u'76'], u'og1_eg_right_r': [u'80', u'81', u'99', u'23', u'18', u'17', u'27', u'26', u'11', u'7', u'71', u'3', u'4']}
- def setupEnv(optrun, device, da_mode="none"):
- locfile = LOCFILE
- objfile = OBJFILE
-
- t = time.time()
-
-
- aps = [
- ('104', 44.8, 15.0, -1.0),
- ('106', 36.9, 7.5, 5.1),
- ('107', 35.3, 14.2, 1.0),
- ('108', 36.1, 4.4, 1.0),
- ('109p2', 51.7, 3.0, 1.0),
- ('110p2', 45.8, 15.0, 3.2),
- ('114', 36.6, 6.5, -1.5),
- ('171', 36.5, 9.55, 2.0),
- ('172', 23.4, 5.8, 2.6),
- ('edueg031', 41.6, 7.4, -1.0),
- ('eduegcorr1', 12.6, 4.9, -0.75),
- ('eduegcorr2', 31.6, 11.3, -1.0),
- ('eduegdemo', 28.7, 14.6, -2.6),
- ('eduog1108', 11.6, 6.4, 2.0),
- ('eduog1server', 48.5, 6.4, 2.0),
- ('eduog2206', 5.7, 6.35, 5.2),
- ('eduog2corr', 32.0, 11.2, 5.5),
- ('eduog2kitchen', 19.2, 9.2, 5.5),
- ('freya', 32.6, 10.5, 1.0),
- ('gaia', 45.8, 10.6, 1.0),
- ('hyperion', 39.3, 1.0, 0.1),
- ('iris', 9.8, 10.4, 0.1),
- ]
-
- dav = defaultdict(lambda: defaultdict(list))
- dav.update({
- # wave proagation trained
- 'all_5': {'iconia': [(-90.0, 0.1), (-80.0, -4.8), (-70.0, -1.1), (-60.0, -1.4), (-50.0, 4.6), (-40.0, 8.6), (-30.0, 4.8), (0.0, -0.7)]},
-
- # wave proagation trained
- 'all_32': {'iconia': [(-95.0, 6.4), (-92.0, 1.8), (-89.0, -0.1), (-86.0, -10.3), (-83.0, -4.1), (-80.0, -7.4), (-77.0, -8.8), (-74.0, -8.0), (-71.0, -7.4), (-68.0, -7.8), (-65.0, -7.3), (-62.0, -8.2), (-59.0, -5.4), (-56.0, -6.4), (-53.0, -7.2), (-50.0, -3.8), (-47.0, -5.8), (-44.0, 1.1), (-41.0, -1.8), (-38.0, -2.7), (-35.0, 0.2), (-32.0, -4.3), (-29.0, 6.8), (-26.0, -6.9), (-23.0, -4.9)]},
- #~ retrained da 'all_32': {'iconia': [(-95.0, 5.669932196007285), (-92.0, 1.276227839123189), (-89.0, -0.3662046945563398), (-86.0, -3.910946672699387), (-83.0, -3.8030051754485314), (-80.0, -6.492840168620569), (-77.0, -7.081285243423606), (-74.0, -7.7933713944929615), (-71.0, -7.865880407116174), (-68.0, -6.970590898308797), (-65.0, -7.5431398071828095), (-62.0, -7.535536597051766), (-59.0, -7.540053462598711), (-56.0, -7.242611183340358), (-53.0, -7.412929069196467), (-50.0, -5.847772074371091), (-47.0, -7.048747794215609), (-44.0, -7.730967657968016), (-41.0, 1.194441382213425), (-38.0, -1.3817805726231445), (-35.0, 3.41487719361011), (-32.0, -6.450405842872062), (-29.0, -1.2614925411163653), (-26.0, 1.4683801704451258), (-23.0, -0.5648552206848222)]},
-
- # independently trained
- 'iconia.highres_3': {'iconia': [(-95.0, 5.356757230730603), (-92.0, 5.989903962422883), (-89.0, 2.081951525948435), (-86.0, 0.9588198151073739), (-83.0, -1.1124684702684822), (-80.0, -2.2060971911363185), (-77.0, -1.144559292083103), (-74.0, -1.2737037746287196), (-71.0, -0.7898499867132481), (-68.0, -0.4132733920785634), (-65.0, -0.219021367593064), (-62.0, 0.22678720895050503), (-59.0, -0.4790131304298816), (-56.0, 0.2520500585382777), (-53.0, -0.36286191361626824), (-50.0, 2.6737368232446266), (-47.0, 5.298159624613441), (-44.0, 5.605423446284294), (-41.0, 3.12608476637685), (-38.0, -2.8778400578617354), (-35.0, -3.4049649437915774), (-32.0, 2.41610282901362), (-29.0, 7.067256082518609), (-26.0, 5.231612522065486), (-23.0, 3.89199737215653)]},
-
- 'iconia_1': {'iconia':
- #~ [(-95.0, -7.302880873503025), (-92.0, -6.694758275178181), (-89.0, 1.280934033820936), (-86.0, 0.2661560308241518), (-83.0, -1.571802042634903), (-80.0, -1.4792074535005466), (-77.0, -1.225551755750758), (-74.0, -1.6632332646292913), (-71.0, -2.3226899888806933), (-68.0, -3.217047979648854), (-65.0, -2.8368001035981942), (-62.0, 0.4547005033193087), (-59.0, -3.244675166899406), (-56.0, -0.5994222971766014), (-53.0, -1.6562485454932854), (-50.0, -2.3886916113837064), (-47.0, -2.2793905050446837), (-44.0, 4.383520219936498), (-41.0, 1.957141308155586), (-38.0, -2.693326949656976), (-35.0, 2.854732592997577), (-32.0, 4.379933428367942), (-29.0, 0.9737578282863675), (-26.0, 3.346395931141383), (-23.0, 0.014800485462740487)]
- [(-95.0, -6.433512328802718), (-92.0, -7.993010242814167), (-89.0, 1.3649206075960643), (-86.0, 0.34203843661928107), (-83.0, -1.8761612239248273), (-80.0, -1.5800910197184097), (-77.0, -1.1185837850999842), (-74.0, -1.643928026263407), (-71.0, -2.226572938639837), (-68.0, -3.232398498289264), (-65.0, -2.8969625256772926), (-62.0, 0.34267262521188646), (-59.0, -3.3148477294364294), (-56.0, -0.9644440841809467), (-53.0, -1.4559915032172799), (-50.0, -3.2200791355445113), (-47.0, -3.7953988874708044), (-44.0, 7.799418048689553), (-41.0, 1.2801888840531523), (-38.0, 3.19418608124801), (-35.0, 4.119867308670708), (-32.0, 2.847163413828114), (-29.0, 3.4342297691687125), (-26.0, -0.14288740547481826), (-23.0, 2.789553774192502)],
- 'nexus':
- [(-95.0, -7.588473017629795), (-92.0, -7.876997149866101), (-89.0, 1.192762329429918), (-86.0, 1.4726683640794338), (-83.0, -1.5084532180439334), (-80.0, -3.3604196283389434), (-77.0, -2.5763433828618614), (-74.0, -2.405101577549579), (-71.0, -2.8815204522284437), (-68.0, -1.9720598575908486), (-65.0, -3.632386359625647), (-62.0, 1.3371051319746396), (-59.0, -1.2207272863983676), (-56.0, 3.177681740637472), (-53.0, -0.4342385654168939), (-50.0, -3.26738775336728), (-47.0, 6.361639274860969), (-44.0, 7.639930984253997), (-41.0, -6.0701785135035635), (-38.0, 4.778900144664069), (-35.0, 0.3856527904317948), (-32.0, 2.5299922812760602), (-29.0, 2.0944858934288413), (-26.0, 3.607403552954609), (-23.0, 0.5346397357781538)],
-
- },
- 'iconia.da_1': {'iconia': [(-95.0, -6.2), (-92.0, 6.7), (-89.0, 3.1), (-86.0, -4.1), (-83.0, -3.2), (-80.0, -3.5), (-77.0, -4.3), (-74.0, -5.7), (-71.0, -4.3), (-68.0, -5.5), (-65.0, -5.7), (-62.0, -7.2), (-59.0, -4.4), (-56.0, -4.8), (-53.0, -4.0), (-50.0, -6.6), (-47.0, -3.3), (-44.0, -2.1), (-41.0, -2.5), (-38.0, -2.0), (-35.0, 7.8), (-32.0, 7.1), (-29.0, -0.3), (-26.0, 4.8), (-23.0, -6.6)]},
-
- 'all_33': {'nexus': [(-95.0, -7.747405712954486), (-92.0, 0.7879084115535535), (-89.0, 1.384257842940331), (-86.0, -0.6868423447611671), (-83.0, 0.250632029913987), (-80.0, -3.5828000936926125), (-77.0, -1.182936302415303), (-74.0, -1.6712164434707135), (-71.0, -0.6996437670397101), (-68.0, -2.61222572481969), (-65.0, -2.8840580605131394), (-62.0, 0.6064440052850679), (-59.0, 1.122592927547138), (-56.0, 3.6525671019872146), (-53.0, -0.6488967425344059), (-50.0, 6.073840311785539), (-47.0, 7.546394567569086), (-44.0, 7.248539328525121), (-41.0, 6.366992893871289), (-38.0, -6.568855844102413), (-35.0, 1.9217233943837269), (-32.0, -2.194340060602978), (-29.0, 2.587665589787904), (-26.0, 3.6322200868899004), (-23.0, -7.019096253161206)]},
-
- })
- # ensure every dict is a defaultdict
- for k, v in dav.items():
- dav[k] = defaultdict(list)
- dav[k].update(v)
-
- vi_path = AP_DATA.joinpath(optrun, 'umic_%s.dat')
-
-
-
- if da_mode == 'none':
- davalues = []
-
- elif da_mode == 'generate':
- lws = serve.LWS(LWS_INI, cfgoverlay={})
- evaluate.buildDeviceAdaptationArrays(lws, vi_path.parent)
-
- apid_locid2measurement = np.load(vi_path.parent.joinpath('%s_apid_locid2measurements.npy' % device))#[:1, :]
- apid_locid2estimated = np.load(vi_path.parent.joinpath('%s_apid_locid2estimates.npy' % device))#[:1, :]
- f = open('r:/temp.txt', 'w')
- while True:
- davalues, avg_delta = optimize.optimizeDeviceAdaption({device: apid_locid2measurement}, apid_locid2estimated, 100)
- f.write('%.3f %r\n' % (avg_delta, davalues))
- f.flush()
-
- print 'using davalues: %s' % davalues[device]
-
-
- davalues = davalues[device]
- elif da_mode == 'optrun':
- davalues = dav[optrun][device]
- print 'using stored (%s): %s' % (optrun, davalues)
- else:
- davalues = dav[da_mode][device]
- print 'using stored (%s): %s' % (da_mode, davalues)
-
- env = Environment(objfile=objfile, locationfile=locfile,
- tmpdir=LWS_TMP, aps=aps, vi_path=vi_path, davalues=davalues)
- print 'setup aps in %.3f sec' % (time.time() - t)
-
- return env
- def flatten_errors(errors):
- ''' return list of 2d errors'''
- errors2d = defaultdict(list)
- for pid, runs in errors.items():
- for runid, _errors in runs.items():
- for pname, e in _errors.items():
- errors2d[pname].append(e[1])
- return errors2d
- def flatten_errors_singlepath(errors):
- ''' return list of 2d errors'''
- errors2d = defaultdict(list)
- for runid, errors in errors.items():
- for pname, e in errors.items():
- errors2d[pname].append(e[1])
- return errors2d
-
- def loop_pf_num_particles(evaluator, pathid2runids, optrun, device):
- #~ evaluator.localizer.replace_below = 1e-40
-
- f = open(THESIS_RESULTS.joinpath('numparticles_loop_pf.txt'), 'w')
- f.write('start: %s\n' % datetime.datetime.now())
- f.write('device:%s optrun:%s\n' % (device, optrun))
- f.write('%s\n' % evaluator.localizer.params())
- f.flush()
-
- for cubewidth in (3,2,1):
- for num_particles in range(100, 10000, 1000):
- evaluator.localizer.num_particles = num_particles
-
- errors, failures = evaluator.evalAll(pathid2runids)
- errors2d = flatten_errors(errors)
- for pname in ('end', 'seq', 'seq_avg'):
- _errors = errors2d[pname]
- avgerr = sum(_errors) / float(len(_errors))
- medianerr = list(sorted(_errors))[len(_errors)/2]
- ff = ','.join('%s/%s' % e for e in failures)
- f.write('type:%s cw:%s nump:%s e2d_mean:%.3f e2d_median:%.3f fail:%s\n' % (pname, cubewidth, num_particles, avgerr, medianerr, ff))
- f.flush()
- def eval_results_num_particles():
- ll = THESIS_RESULTS.joinpath('numparticles_loop_pf.txt').lines()[3:-1]
- data = [dict(e.split(':') for e in l.split()) for l in ll]
- xs = []
- ys = []
- for d in data:
- if not d['type'] == 'seq_avg':
- continue
- xs.append(int(d['nump']))
- ys.append((float(d['e2d_mean']), float(d['e2d_median'])))
-
- lab.clf()
- lab.plot(xs, ys)
-
- lab.ylim((0, 4))
- #~ lab.xlim((0, 1))
- lab.grid()
- lab.show()
-
- def loop_pf_sigmas(evaluator, pathid2runids, optrun, device):
- xs = []
- ys = []
- #~ evaluator.localizer.replace_below = 1e-40
-
- f = open(THESIS_RESULTS.joinpath('sigma_loop_pf.txt'), 'w')
- f.write('start: %s\n' % datetime.datetime.now())
- f.write('device:%s optrun:%s\n' % (device, optrun))
- f.write('%s\n' % evaluator.localizer.params())
- f.flush()
-
- for esig in np.arange(2, 10, 0.5):
-
- for tsig in np.arange(2, 10, 0.5):
- evaluator.localizer.transition_sigmas = (tsig, tsig, tsig / 2.0)
- evaluator.localizer.emission_sigma = esig
-
- errors, failures = evaluator.evalAll(pathid2runids)
- errors2d = flatten_errors(errors)
- for pname in ('end', 'seq', 'seq_avg'):
- _errors = errors2d[pname]
- avgerr = sum(_errors) / float(len(_errors))
- medianerr = list(sorted(_errors))[len(_errors)/2]
- ff = ','.join('%s/%s' % e for e in failures)
- f.write('type:%s esig:%s tsig:%s e2d_mean:%.3f e2d_median:%.3f fail:%s\n' % (pname, esig, tsig, avgerr, medianerr, ff))
- f.flush()
-
- #~ values.append(avgerr)
- #~ ys.append(values)
-
- #~ lab.clf()
- #~ lab.plot(xs, ys)
-
- #~ lab.legend(pnames)
- #~ lab.ylim((0, 4))
- #~ lab.xlim((0, 1))
- #~ lab.grid()
- #~ lab.show()
- #~ if isinstance(x, int):
- #~ lab.savefig('r:/%s_%03d.png' % (fname, x))
- #~ else:
- #~ lab.savefig('r:/%s_%s.png' % (fname, x))
- #~ return
- def latex_table_path_detail(errors, combine_forward_back=False, header=''):
- algo_errtype = errors.values()[0].keys()
-
- algo_errtype.sort()
-
- #~ print algo_errtype
-
- algo_errs = ['%s/%s' % e for e in algo_errtype]
- headers = {'hmm/seq': 'HMM', 'hmm/seq_avg': 'HMM/avg', 'hmm/end': 'HMM/off',
- 'pf/seq': 'PF', 'pf/end': 'PF/off', 'pf/seq_avg': 'PF/avg',
- 'lmse/seq': 'LMSE'}
- algo_errs = [headers[e] for e in algo_errs]
-
- noisedata = errors
-
- total_means = defaultdict(list)
-
- rows = []
- for pathid, l in sorted(noisedata.items()):
- if combine_forward_back and pathid.endswith('_r'):
- continue
-
- cells = ['{\small %s}' % pathid.replace('_', '-')]
- for (algo, errtype) in algo_errtype:
- mean = l[(algo, errtype)]
- total_means[(algo, errtype)].append(mean)
-
- if combine_forward_back:
- other_mean = noisedata[pathid + '_r'][(algo, errtype)]
- total_means[(algo, errtype)].append(other_mean)
- cells.append('%.2f' % ((mean + other_mean) / 2))
- else:
-
- cells.append('%.2f' % mean)
-
-
- rows.append(' & '.join(cells) + '\\\\ \\hline')
- print r'\begin{center}'
- print r' \begin{tabularx}{1.008\textwidth}{|X|c|c|c|c|c|c|c|}\hline'
- print r' \rowcolor[gray]{.85}'
- if header:
- print header
- print r' \rowcolor[gray]{.92}'
- print r' \textbf{Path} & ' + ' & '.join(r'{\small %s}' % e.replace('_', '-') for e in algo_errs) + '\\\\ \\hline\\hline'
- for row in rows:
- print ' ' + row
-
- mm = []
- vv = []
- for (algo, errtype) in algo_errtype:
- means = total_means[(algo, errtype)]
- mm.append(sum(means) / float(len(means)))
- vv.append(scipy.std(means))
- print r'\hline Mean in $m$ &', ' & '.join('%.2f' % e for e in mm), r'\\ \hline'
- print 'Stdev in $m$ &', ' & '.join('%.2f' % e for e in vv), r'\\ \hline'
- print r' \end{tabularx}'
- print r'\end{center}'
-
- def eval_results_sigmas():
- ll = path(THESIS_RESULTS.joinpath('sigma_loop_pf.txt')).lines()[3:-1]
-
- destfile = THESIS_FIGURES.joinpath('evaluator_brute_force.png')
-
- data = [dict(e.split(':') for e in l.split()) for l in ll]
- #~ for d in data:
- #~ print d['e2d_mean'], d['e2d_median']
-
-
- aspectRatio = 0.5
- dpi = 90
- width = 1000
- figsize = width / float(dpi), width / float(dpi) * aspectRatio
- fig = plt.figure(figsize=figsize)
- csets = []
- axs = []
-
- contour_levels = np.linspace(1.8, 3.3, 30)
-
- for errtype, plotidx, title in [('end', 121, 'Particle Filter offline error (LE)'), ('seq_avg', 122, 'Particle Filter online error (LE)')]:
-
- ax = fig.add_subplot(plotidx) # projection='3d'
- ax.set_title('%s' % title, y=1.15, fontsize=15)
- axs.append(ax)
-
- xi = np.linspace(2,10,128)
- yi = np.linspace(2,10,128)
-
-
- xs = []
- ys = []
- zs = []
- for d in data:
- if not d['type'] == errtype:
- continue
-
- #~ print float(d['tsig']), float(d['esig'])
- if float(d['tsig']) < 3.0 or float(d['esig']) < 3:
- continue
-
- xs.append(float(d['tsig']))
- ys.append(float(d['esig']))
- zs.append(float(d['e2d_mean']))
-
- zi = griddata((xs, ys), zs, (xi[None,:], yi[:,None]), method='cubic')
- xim, yim = np.meshgrid(xi, yi)
-
- #~ plt.contour(xi,yi,zi,15,linewidths=0.5,colors='k')
- #~ print len(zi), len(xi)
-
- #~ ax.scatter(xs, ys, zs, label='errtype: %s' % errtype)
- #~ ax.plot_wireframe(xim, yim, zi, label='xx')
- #~ ax.plot_surface(xim, yim, zi)
- cset = ax.contourf(xim, yim, zi, 30) #contour_levels
- csets.append(cset)
-
- # Add the contour line levels to the colorbar
- #~ cbar.add_lines(cset)
-
- ax.set_xlim((min(xs), max(xs)))
- ax.set_ylim((min(ys), max(ys)))
-
- ax.set_xlabel('$\sigma$ for Transition Probabilities')
- ax.set_ylabel('$\sigma$ for Emission Probabilities')
- #~ ax.set_zlabel('error')
-
- fig.subplots_adjust(bottom=0.08, left=0.04, right=0.99, top=0.91)
-
- for ax, cset in zip(axs, csets):
- divider = make_axes_locatable(ax)
- cax = divider.append_axes("top", size="5%", pad=0.3)
- ticks=np.arange(1.7, 3.5, 0.05)
- cbar = lab.colorbar(cset, cax, ticks=ticks, orientation='horizontal')
- #~ cbar.ax.set_ylabel('error')
-
-
- #~ plt.show()
- fig.savefig(destfile, format='png', dpi=dpi)
- #~ for l in ll:
- #~ if l.startswith('type:')
-
- #~ eval_results()
- COLORS = {
- ('hmm', 'end'): 'blue',
- ('hmm', 'seq'): 'blue',
- ('hmm', 'seq_avg'): '#88BBFF',
- ('pf', 'end'): 'red',
- ('pf', 'seq'): 'red',
- ('pf', 'seq_avg'): 'orange',
- ('lmse', 'seq'): '#33CC33',
- }
-
- def plot_results_synthetic():
- cube_width = 2
- THESIS_RESULTS = path('t:')
-
- destfile = THESIS_FIGURES.joinpath(r'evaluator_synthetic_%s.png' % cube_width)
- if socket.gethostname() == 'cere-kombinat':
-
- destfile = path(r'N:\thesis_results').joinpath(r'evaluator_synthetic_%s.png' % cube_width)
- THESIS_RESULTS = path(r'N:\thesis_results')
-
- destfile = path(r't:').joinpath(r'evaluator_synthetic_%s.png' % cube_width)
- THESIS_RESULTS = path(r't:')
-
-
- algos = [
- ('pf', ('end', 'seq', 'seq_avg')),
- ('hmm', ('end', 'seq', 'seq_avg')),
- ('lmse', ('seq', ))
- ]
-
-
- def _load(algo):
- ll = THESIS_RESULTS.joinpath('synthetic_eval_%s_%s_22.txt' % (cube_width, algo)).lines()[3:-1]
- return [dict(e.split(':') for e in l.split()) for l in ll if l.strip()]
-
- aspectRatio = 0.55
- dpi = 110
- width = 1000
- figsize = width / float(dpi), width / float(dpi) * aspectRatio
- fig = plt.figure(figsize=figsize)
-
- errsubtype = 'e2d_mean'
-
- plotidx2algo = {121: {('hmm', 'seq'), ('hmm', 'seq_avg'), ('pf', 'seq'), ('pf', 'seq_avg'), ('lmse', 'seq')},
- 122: {('hmm', 'end'), ('pf', 'end')}}
- labels = {'hmm/seq': 'HMM', 'hmm/seq_avg': 'HMM/Avg', 'hmm/end': 'HMM', 'pf/seq': 'PF', 'pf/end': 'PF', 'pf/seq_avg': 'PF/Avg', 'lmse/seq': 'LMSE'}
- plotidx2axs = {}
- plotidx2plotvalues = defaultdict(list)
-
- noise2path2errtype2mean = defaultdict(lambda: defaultdict(dict))
-
- for plotidx, title in [( 121, '2D Online Error (LE)'), (122, '2D Offline Error (LE)')]:
-
- ax = fig.add_subplot(plotidx) # projection='3d'
- plotidx2axs[plotidx] = ax
-
- ax.set_title('%s' % title, y=1.02, fontsize=15)
- min_x = 10**99
- max_x = -10**99
-
- ys_bypath = defaultdict(list)
- xs_bypath = defaultdict(list)
-
- for algo, errtypes in algos:
- data = _load(algo)
- for errtype in errtypes:
- if not (algo, errtype) in plotidx2algo[plotidx]:
- continue
-
- xs = []
- ys = []
-
-
- for d in data:
- if d['type'] != errtype:
- continue
-
- if 'pathid' in d:
- ys_bypath[(algo, errtype, errsubtype, d['pathid'])].append(float(d[errsubtype]))
- xs_bypath[(algo, errtype, errsubtype, d['pathid'])].append(float(d['sigma']))
- else:
-
- x = float(d['sigma'])
- xs.append(x)
- min_x = min(min_x, x)
- max_x = max(max_x, x)
-
- y = float(d[errsubtype])
- ys.append(y)
-
-
- ax.plot(xs, ys, label=labels['%s/%s' % (algo, errtype)], color=COLORS[(algo, errtype)], linewidth=2.0)
- plotidx2plotvalues[plotidx].append((xs, ys))
-
- #~ print xs_bypath, ys_bypath
-
-
- legend = ax.legend(loc='upper left')
- plt.setp(legend.get_texts(), fontsize='medium')
- #~ ltext.fontsize='small'
-
-
- ax.grid()
- ax.set_xlim((min_x, max_x))
- ax.set_ylim((0, 4))
-
- ax.set_ylabel('Localization Error in $m$')
- plt.setp(ax.get_xticklabels(), visible=False)
-
- divider = make_axes_locatable(ax)
- ax_path_variance = divider.append_axes("bottom", 1.2, pad=0.1, sharex=ax)
-
- ax_path_variance.set_xlim((min_x, max_x))
- ax_path_variance.set_ylim((0, 1))
- ax_path_variance.yaxis.set_major_formatter(FuncFormatter(lambda x, pos=None: '' if x >= 1 else '%.1f' % x))
-
- ax_path_variance.grid()
- ax_path_variance.set_ylabel('Stdev in $m$')
- ax_path_variance.set_xlabel('Noise $\sigma$')
-
- #~ NOISE_LEVEL = [0, 3, 6, 9, 12, 15]
-
- move_right = 0
- for algo, errtypes in algos:
- for errtype in errtypes:
- if not (algo, errtype) in plotidx2algo[plotidx]:
- continue
-
- means = defaultdict(list)
- for (_algo, _errtype, _errsubtype, pathid), sigmas in xs_bypath.items():
-
- for i, sigma in enumerate(sigmas):
- if (_algo, _errtype, _errsubtype) == (algo, errtype, errsubtype):
- #~ means[sigma].append(ys_bypath[(algo, errtype, errsubtype, pathid)][i])
-
- path_mean = ys_bypath[(algo, errtype, errsubtype, pathid)][i]
- means[int(sigma)].append(path_mean)
-
- noise2path2errtype2mean[sigma][pathid][(algo, errtype)] = path_mean
-
-
-
-
- path_variance = {}
- for sigma, ms in means.items():
- path_variance[sigma] = scipy.std(means[sigma])
- #~ mu = sum(means[sigma]) / float(len(means[sigma]))
- #~ path_variance[sigma] = (sum((m-mu)**2 for m in means[sigma]) / float(len(means[sigma])))**0.5
-
- if plotidx == 121:
- ww = 0.21
- else:
- ww = 0.4
- xs = [x + move_right * ww - ww/4.0 for x in path_variance.keys()]
- ax_path_variance.bar(xs, path_variance.values(), width=ww, linewidth=0.1, alpha=1.0, color=COLORS[(algo, errtype)])
-
- move_right += 1
-
- #~ ax_path_variance.bar(xs_bypath[k], ys_bypath[k], s=1.5, linewidth=0, alpha=0.8)
-
- fig.subplots_adjust(bottom=0.09, left=0.06, right=0.998, top=0.93)
-
-
- for xs, ys in plotidx2plotvalues[122]:
- plotidx2axs[121].plot(xs, ys, color='#99AA99')
-
- for xs, ys in plotidx2plotvalues[121]:
- plotidx2axs[122].plot(xs, ys, color='#99AA99')
-
- print 'saving to %s' % destfile.abspath()
- fig.savefig(destfile, format='png', dpi=dpi)
-
-
-
- noise = 14
- errors = noise2path2errtype2mean[noise]
-
- header = r' \multicolumn{8}{|>{\columncolor[gray]{.8}}c|}{\textbf{Online and Offline LEs in $m$ for Noise: $\sigma=%sdBm$ (forward$+$backward)}} \\ \hline' % noise
-
- #~ latex_table_path_detail(errors, True, header)
- for i in range(0, 18, 4):
- header = r' \multicolumn{8}{|>{\columncolor[gray]{.8}}c|}{\textbf{Online and Offline LEs in $m$ for Noise: $\sigma=%sdBm$ (forward$+$backward)}} \\ \hline' % i
- latex_table_path_detail(noise2path2errtype2mean[noise], False, header)
-
- #~ plot_results_synthetic()
- #~ sys.exit()
-
- def eval_synthetic():
- TRACKED_PATH = LWS_TMP.joinpath(r'tracked_path_synthetic')
- THESIS_RESULTS = path('t:/')
-
- env = setupEnv('iconia_1', 'iconia')
-
- pfconfig = {'num_particles': 20000, 'do_blocking': True} # 'smooth': 1.0
- hmmconfig = {'prune_to': 15000, 'freespace_scan': -1} # 'smooth': 1.0
- errortype = 'mean'
- LIMIT = 20
- NOISE_LIMIT = 11
- NOISE_LIMIT_LOWER = 10
- NOISE_STEP = 1
-
- cube_width = 2
-
- algos = [
- #~ ('pf', ('end', 'seq', 'seq_avg')),
- ('hmm', ('end', 'seq', 'seq_avg')),
- #~ ('lmse', ('seq', )),
- ]
-
- algo2f = {}
- for algo, errnames in algos:
-
- outfile = THESIS_RESULTS.joinpath('synthetic_eval_%s_%s_22.txt' % (cube_width, algo))
- print 'storing to %s' % outfile.abspath()
-
- f = open(outfile, 'w')
- f.write('start: %s\n' % datetime.datetime.now())
- f.write('device:%s optrun:%s\n' % ('iconia', 'inconia_1'))
- f.flush()
- algo2f[algo] = f
-
-
- for i, sigma in enumerate(np.arange(NOISE_LIMIT_LOWER, NOISE_LIMIT, NOISE_STEP)):
- for algo, errnames in algos:
- f = algo2f[algo]
- print sigma
- device = 'sigma_%.1f' % sigma
-
- evaluator = Evaluator(optrun, device, env, cube_width, TRACKED_PATH,
- algo=algo, verbose=False, output_html=False,
- errortype=errortype, interpolate=False,
- hmmconfig=hmmconfig,
- pfconfig=pfconfig
- )
- if i == 0:
- f.write('%s\n' % evaluator.localizer.params())
-
- pathid2runids = evaluate.getCollectedPathids2runids(device, CONFIGURED_PATHS, TRACKED_PATH, None)
-
- errors, failures = evaluator.evalAll(pathid2runids, limit=LIMIT)
-
-
- errors2d = flatten_errors(errors)
- for errname in errnames:
- _errors = errors2d[errname]
- avgerr = sum(_errors) / float(len(_errors))
- medianerr = list(sorted(_errors))[len(_errors)/2]
- ff = ','.join('%s/%s' % e for e in failures)
- f.write('type:%s sigma:%s e2d_mean:%.3f e2d_median:%.3f fail:%s\n' % (errname, sigma, avgerr, medianerr, ff))
-
- for pid, p_errors in errors.items():
- errors2d = flatten_errors_singlepath(p_errors)
- for errname in errnames:
- _errors = errors2d[errname]
- avgerr = sum(_errors) / float(len(_errors))
- medianerr = list(sorted(_errors))[len(_errors)/2]
- ff = ','.join('%s/%s' % e for e in failures)
- f.write('pathid:%s type:%s sigma:%s e2d_mean:%.3f e2d_median:%.3f fail:%s\n' % (pid, errname, sigma, avgerr, medianerr, ff))
- f.write('\n')
-
- f.flush()
- print '---'
-
- def eval_results_real(scenes, auto_plot=False):
- THESIS_RESULTS = path('t:/')
- TRACKED_PATH = path(r'D:\loco-dev\dirk\thesis\results\tracked_path')
-
- logging.basicConfig(level=logging.DEBUG)
-
- finished_scenes = OrderedDict()
- for scene_name, scene in scenes.items():
- if not scene.get('on', True):
- continue
-
- hmmconfig = scene.get('hmmconfig', {})
- pfconfig = scene.get('pfconfig', {})
- interpolate = scene.get('interpolate', False)
- errortype = 'mean'
- optrun = scene['optrun']
- device = scene['device']
- algo = scene['algo']
- cube_width = scene['cubewidth']
- damode = scene.get('damode', 'none')
- tracked_path = scene.get('tracked', TRACKED_PATH)
- limit = scene.get('limit', None)
- env = setupEnv(optrun, device, damode)
-
-
-
- evaluator = Evaluator(optrun, device, env, cube_width, tracked_path,
- algo=algo, verbose=False, output_html=False,
- errortype=errortype, interpolate=interpolate,
- hmmconfig=hmmconfig,
- pfconfig=pfconfig
- )
- pathid2runids = evaluate.getCollectedPathids2runids(device, CONFIGURED_PATHS, tracked_path, None)
- for pid in pathid2runids.keys():
- if pid.startswith('still_'):
- pathid2runids.pop(pid)
-
- errors, failures = evaluator.evalAll(pathid2runids, limit=limit)
-
- f = open(THESIS_RESULTS.joinpath('%s.errors' % scene_name), 'wb')
-
- for k, v in errors.items():
- errors[k] = dict(v)
- for k2, v2 in errors[k].items():
- errors[k][k2] = dict(v2)
-
- dumpable = dict(errors)
- pickle.dump(dumpable, f)
- f.close()
-
- finished_scenes[scene_name] = scene
-
- if auto_plot:
- plot_results_real(finished_scenes)
- def plot_results_real(scenes):
- THESIS_RESULTS = path('t:/')
-
- SCENE_COUNT2TEXT_OFFSET = defaultdict(float)
- SCENE_COUNT2TEXT_OFFSET.update({1:1, 2: 1.5, 3: 1.7, 4: 1.8, 18: 1.5})
-
- SCENE_COUNT2TEXT_SIZE = defaultdict(lambda: 20)
- SCENE_COUNT2TEXT_SIZE.update({3: 15, 4: 15, 18: 14})
-
- SCENE_COUNT2LABEL_OFFSET = defaultdict(lambda: 0.23)
- SCENE_COUNT2LABEL_OFFSET.update({4: 0.34})
-
- path_errors = defaultdict(dict)
-
- scene_errors = defaultdict(lambda: defaultdict(list))
- for scene_name, scene in scenes.items():
-
- f = open(THESIS_RESULTS.joinpath('%s.errors' % scene_name), 'rb')
- errors = pickle.load(f)
- for pid, _errors in errors.items():
- #~ print pid, _errors.keys()
-
- for errtype, es in flatten_errors_singlepath(_errors).items():
- if 'filter_err' in scene and scene['filter_err'] != errtype:
- continue
- scene_errors[scene_name][errtype].extend(es)
- path_errors[pid][(scene['algo'], errtype)] = scipy.mean(es)
-
- aspectRatio = 0.30
- dpi = 90
- width = 1000
- figsize = width / float(dpi), width / float(dpi) * aspectRatio
- fig = plt.figure(figsize=figsize)
-
- ax = fig.add_subplot(111) # projection='3d'
-
-
- xs = []
- ys = []
- colors = []
- err_std = []
- x = 0
- x_labels = defaultdict(str)
- scenetexts = []
- for scene_name, scene in scenes.items():
- errors = scene_errors[scene_name]
-
- scenetexts.append((x, scene_name))
-
- for errtype, es in errors.items():
- print scene_name, errtype, scipy.mean(es)
- xs.append(x-0.4)
- ys.append(scipy.mean(es))
- err_std.append(scipy.std(es))
- colors.append(COLORS[(scene['algo'], errtype)])
- x_labels[x] = '%s/%s' % (scene['algo'], errtype)
- x += 1
-
- if 'skip' in scene:
- x += scene['skip']
- elif len(scenes) < 4:
- x += 1
- else:
- x += 0.4
-
- ylimit = (max(ys) + max(err_std))*1.1
- for x, scene_name in scenetexts:
- if scenes[scene_name].get('use_title'):
- title = scenes[scene_name].get('title', scene_name)
- else:
- title = scene_name
-
-
- if len(scene_errors[scene_name]) == 3 or len(scenetexts) == 18:
- o = SCENE_COUNT2TEXT_OFFSET[len(scenetexts)]
- if len(scenetexts) == 18:
- if len(title) > 11:
- o += 0.55
- elif len(title) < 6:
- o -= 0.2
- else:
- o = 0.8
-
- if scenes[scene_name].get('no_title'):
- continue
-
-
- plt.text(x + o,
- ylimit, title, size=SCENE_COUNT2TEXT_SIZE[len(scenetexts)],
- ha="right", va="top",
- bbox = dict(boxstyle="square",
- ec=(1., 0.8, 0.8),
- fc=(1., 0.98, 0.98),
- )
- )
-
-
- fig.subplots_adjust(bottom=0.08, left=0.06, right=0.99, top=0.91)
-
- if len(scenes) < 10:
- labels = {'hmm/seq': 'HMM/on', 'hmm/seq_avg': 'HMM/avg', 'hmm/end': 'HMM/off',
- 'pf/seq': 'PF/on', 'pf/end': 'PF/off', 'pf/seq_avg': 'PF/avg',
- 'lmse/seq': 'LMSE', '':''}
- else:
- labels = {'hmm/seq': 'HMM', 'hmm/seq_avg': 'HMM', 'hmm/end': 'HMM',
- 'pf/seq': 'PF', 'pf/end': 'PF', 'pf/seq_avg': 'PF/avg',
- 'lmse/seq': 'LMSE', '':''}
- ax.xaxis.set_major_locator(FixedLocator(x_labels.keys()))
- ax.xaxis.set_major_formatter(FuncFormatter(lambda x, pos=None: labels[x_labels[x]]))
-
- if len(scenes) > 10:
- for tick in ax.xaxis.get_major_ticks():
- tick.label1.set_fontsize(12)
-
- rects = ax.bar(xs, ys, width=0.8, color='#DDDDDD', yerr=err_std,
- error_kw=dict(elinewidth=6, ecolor='grey'))
- if len(scenes) < 4:
- ax.set_xlim(-1, max(xs) + 1.3)
- else:
- ax.set_xlim(-0.6, max(xs) + 1)
-
- ax.set_ylim(0, ylimit)
-
-
-
- ax.set_ylabel('Localization Error in $m$')
-
- def autolabel(rects):
- # attach some text labels
- for rect in rects:
- height = rect.get_height()
- o = SCENE_COUNT2LABEL_OFFSET[len(scenetexts)]
- if len(scenes) > 10:
- o += 0.07
- plt.text(rect.get_x() + rect.get_width() / 2.0 + o,
- height*1.02,
- ('%.2f' % height) if len(scenes) < 10 else ('%.1f' % height),
- ha='center', va='bottom', fontsize= 11 if len(scenetexts) > 3 else 12)
- autolabel(rects)
- ax.grid(axis='y')
-
- namebase = '_'.join(scenes.keys())
-
- destfile = THESIS_FIGURES.joinpath(r't:/%s.png' % namebase)
- print 'saving to %s' % destfile.abspath()
- fig.savefig(destfile, format='png', dpi=dpi)
- #~ D:\loco-dev\dirk\thesis\figures
-
- #~ fig.savefig(destfile, format='png', dpi=dpi)
-
- latex_table_path_detail(path_errors, True)
-
- def parse_scenes(s, scenes):
- res = OrderedDict()
- for scene_name in s.split():
- if scene_name.startswith('#'):
- on = False
- scene_name = scene_name[1:]
- else:
- on = True
-
- if scene_name in scenes:
- res[scene_name] = scenes[scene_name]
- res[scene_name]['on'] = on
- else:
- print '"%s"' % scene_name
- UNKNOWN_SCENE
- return res
-
- def define_scenarios():
-
-
- scenes = OrderedDict()
- scenes['synth_old'] = {
- 'optrun': 'iconia_1',
- 'device': 'sigma_0.0',
- 'algo': 'hmm',
- 'tracked': LWS_TMP.joinpath(r'tracked_path_synthetic_old')
- }
-
- scenes['synth_new'] = d = dict(scenes['synth_old'])
- d['tracked'] = LWS_TMP.joinpath(r'tracked_path_synthetic')
-
- scenes['synth_interp_old'] = d = dict(scenes['synth_old'])
- d['interpolate'] = True
-
-
- pfconfig = {'num_particles': 15000,
- 'do_blocking': False,
- 'transition_sigmas': (4.0, 4.0, 2.0),
- #~ 'max_replace': 5000,
- #~ 'replace_below': 1e-10,
- #~ 'smooth': 1.0,
- #~ 'turnoff_blocking':1e1,
- }
-
- hmmconfig = {'prune_to': 8000, 'freespace_scan': -1}
-
- basic = { 'optrun': 'iconia_1',
- 'device': 'iconia',
- 'pfconfig': pfconfig,
- 'hmmconfig': hmmconfig,
- 'cubewidth' : 2,
- 'damode': 'none'
- }
-
- nexus = { 'optrun': 'all_33',
- 'device': 'nexus',
- 'pfconfig': pfconfig,
- 'hmmconfig': hmmconfig,
- 'cubewidth' : 2,
- 'interpolate': True,
- 'damode': 'none'
- }
-
- scenes['hmm_basic'] = hmm_basic = dict(basic)
- hmm_basic['algo'] = 'hmm'
- #~ hmm_basic['cubewidth'] = 3
-
- scenes['hmm_basic_ip'] = hmm_basic_ip = dict(hmm_basic)
- hmm_basic_ip['interpolate'] = True
- hmm_basic_ip['title'] = 'Iconia/HMM'
-
- scenes['hmm_basic_da'] = hmm_basic_da = dict(hmm_basic)
- hmm_basic_da['damode'] = 'generate'
- hmm_basic_da['title'] = 'Iconia/HMM'
-
- scenes['hmm_basic_ip_da'] = hmm_basic_ip_da = dict(hmm_basic_ip)
- hmm_basic_ip_da['damode'] = 'optrun'
-
- scenes['pf_basic'] = pf_basic = dict(basic)
- pf_basic['algo'] = 'pf'
-
- scenes['pf_basic_ip'] = pf_basic_ip = dict(pf_basic)
- pf_basic_ip['interpolate'] = False
- pf_basic_ip['title'] = 'Iconia/PF'
-
-
- scenes['pf_basic_da'] = pf_basic_da = dict(pf_basic)
- pf_basic_da['damode'] = 'generate'
-
- scenes['pf_basic_ip_da'] = pf_basic_ip_da = dict(pf_basic_ip)
- pf_basic_ip_da['damode'] = 'optrun'
-
- scenes['lmse_basic'] = lmse_basic = dict(basic)
- lmse_basic['algo'] = 'lmse'
- lmse_basic['limit'] = 5
-
-
- scenes['lmse_basic_ip'] = lmse_basic_ip = dict(lmse_basic)
- lmse_basic_ip['interpolate'] = True
- lmse_basic_ip['title'] = 'LMSE'
-
- scenes['lmse_basic_ip_da'] = lmse_basic_ip_da = dict(lmse_basic_ip)
- lmse_basic_ip_da['damode'] = 'optrun'
-
- scenes['pf_nexus_all'] = pf_nexus_all = dict(nexus)
- pf_nexus_all['algo'] = 'pf'
- pf_nexus_all['title'] = 'Nexus/PF'
-
- scenes['hmm_nexus_all'] = hmm_nexus_all = dict(nexus)
- hmm_nexus_all['algo'] = 'hmm'
- hmm_nexus_all['title'] = 'Nexus/HMM'
-
-
- scenes['pf_nexus_basic'] = pf_nexus_basic = dict(pf_nexus_all)
- pf_nexus_basic['optrun'] = 'iconia_1'
-
- scenes['hmm_nexus_basic'] = hmm_nexus_basic = dict(hmm_nexus_all)
- hmm_nexus_basic['obtrun'] = 'iconia_1'
-
-
- for t in 'pf_basic_ip', 'hmm_basic_ip':
- scenes[t.split('_')[0] + '_iconia_all'] = d = dict(scenes[t])
- d['optrun'] = 'all_33'
- d['damode'] = 'none'
- #~ d['title'] = d['title']
- #~ lmse_basic_ip_da['damode'] = 'optrun'
-
-
-
- for t in 'lmse_basic_ip_da', 'pf_basic_ip_da', 'hmm_basic_ip_da':
-
- scenes[t + '_hr'] = d = dict(scenes[t])
- d['optrun'] = 'iconia.da_1'
- d['damode'] = 'optrun'
-
- s = '#hmm_basic #hmm_basic_ip_da #hmm_basic_ip #hmm_basic_da'
-
- s = '#lmse_basic_ip_da hmm_basic_ip_da #pf_basic_ip_da'
- s = '#lmse_basic_ip #hmm_basic_ip #pf_basic_ip'
-
- #~ s = 'pf_basic_ip_da_hr'# lmse_basic_ip_da_hr hmm_basic_ip_da_hr
-
- s = ' hmm_nexus_all pf_nexus_all hmm_iconia_all pf_iconia_all '
- #~ s = '#hmm_nexus_basic #pf_nexus_basic #hmm_basic_ip #pf_basic_ip'
- #~ s = '#lmse_basic_ip #hmm_basic_ip #pf_basic_ip'
-
- #~ s = 'pf_nexus_basic'
-
- coarse_models =[
- ('full2', 'iconia.all2mat_2', 'Full2'),
- ('basic1', 'iconia.basic1mat_1', 'Basic1'),
- ('full1', 'iconia.all1mat_1', 'Full1'),
- ('basic2', 'iconia.basic2mat_2', 'Basic2'),
- ('basicdoors5', 'iconia.basic5mat_1', 'Basic+Doors5'),
- ('full11', 'iconia_1', 'Full11')]
-
- for name, optrun, title in coarse_models:
- scenes[name + '_lmse'] = d = dict(lmse_basic_ip)
- d['optrun'] = optrun
- d['title'] = title
- d['damode'] = 'none'
- d['skip'] = -0.1
-
- scenes[name + '_pf'] = d = dict(pf_basic_ip)
-
- d['optrun'] = optrun
- d['damode'] = 'none'
- d['filter_err'] = 'end'
- d['title'] = ''
- d['skip'] = -0.1
-
- scenes[name + '_hmm'] = d = dict(hmm_basic_ip)
-
- d['optrun'] = optrun
- d['damode'] = 'none'
- d['filter_err'] = 'end'
- d['title'] = ''
- d['skip'] = 0.2
- #
- #~ s = '''
- #~ #basic1_lmse #basic1_pf #basic1_hmm
- #~ #full1_lmse #full1_pf #full1_hmm
- #~ #basic2_lmse #basic2_pf #basic2_hmm
- #~ #full2_lmse #full2_pf #full2_hmm
- #~ #basicdoors5_lmse #basicdoors5_pf #basicdoors5_hmm
- #~ #full11_lmse #full11_pf #full11_hmm
- #~ '''
-
- res = parse_scenes(s, scenes)
-
-
- for scene in res.values():
- scene['limit'] = 2
-
- scene['use_title'] = True
- #~ scene['no_title'] = True
- #~ scene['damode'] = 'optrun'
-
- return res
-
- eval_results_real(define_scenarios(), auto_plot=True)
- plot_results_real(define_scenarios())
- sys.exit()
- if __name__ == '__main__':
-
- optrun = 'all_5'
- optrun = 'iconia_1'
- device = 'iconia'
- TRACKED_PATH = LWS_TMP.joinpath('tracked_path_synthetic')
-
- device = 'sigma_00'
-
-
- logging.basicConfig(level=logging.DEBUG)
- np.set_printoptions(linewidth=200, threshold=1000)
- cython_annotate('utils/accelerated.pyx')
-
-
- eval_synthetic()
-
- sys.exit()
-
-
-
- env = setupEnv(optrun, device)
-
-
- #~ import utils.accelerated as speed
- HMMLocalizer.no_signal_delta = 0
-
- cfgoverlay = {'scenes': {'umic': {'objfile': OBJFILE, 'locationfile': LOCFILE}},
- 'tmp_apdata': AP_DATA,
- 'tmp_optruns': r'N:\lws-instance\tmp\_optruns',
- 'tmp_tracked': TRACKED_PATH,
- }
-
- pathid = 'og1_classic'
- pathid = 'og1_eg_right'
-
- filter_pathid = 'og1_eg_right'
- filter_pathid = 'og1_classic'
- #~ filter_pathid = 'og1'
-
- #~ s = lws.evaluate(optrun, device, 3, refresh=True, filter=filter_pathid)
-
- #~ s = lws.getLocationsAndMeasurements('iconia')
- #~ print s
-
- #~ lws = server.LWS(r'D:\loco-dev\dirk\lws\instance_nostromo\lws.ini', cfgoverlay=cfgoverlay)
-
-
- #~ s = lws.evaluate(device, optrun, 3, refresh=True, filter=filter_pathid)
-
- #~ sys.exit()
-
- #~ open('r:/out.html', 'w').write(s.replace('src="/static/', r'src="D:/loco-dev/dirk/lws/static/'))
-
-
- #~ s = lws.evaluatePath(optrun, device, pathid=pathid, runid="05", setting="end", cubewidth=3, refresh=True)
- #~ s = s.replace('&', '&')
- #~ tree = StringToTree(s)
- #~ for e in tree.xpath('//td[@id="imgs1"]/img'):
- #~ e.attrib['src'] = r'D:\loco-dev\dirk\tmp\evaluator\%s\%s_3_end_%s_05.png' % (optrun, device, pathid)
- #~ for e in tree.xpath('//td[@id="imgs2"]/img'):
- #~ e.attrib['src'] = r'D:\loco-dev\dirk\tmp\evaluator\%s\%s_3_seq_%s_05.png' % (optrun, device, pathid)
-
- #~ s = TreeToString(tree)
- #~ open('r:/out_path.html', 'w').write(s.replace('src="/static/', r'src="D:/loco-dev/dirk/lws/static/'))
- #~ errortype = 'mean'
- pfconfig = {'num_particles': 10000, 'do_blocking': True, 'smooth': 1.0}
- evaluator = Evaluator(optrun, device, env, 1, TRACKED_PATH,
- algo='lmse', verbose=False, output_html=False, pfconfig=pfconfig,
- errortype='mean', interpolate=False)
-
- #~ pathid2runids = {'og1_classic': ('05', '06', '07'),
- #~ 'og1_straight': ('02', '07'),
- #~ }
-
- #~ pathid2runids = {'og1_classic_r': ('02',)}
- pathid2runids = {'og1_classic': ('01',)}
- #~ pathid2runids = {'og1_long_rooms': ('09',)}
- #~ pathid2runids = {'og1_eg_right': ('11',)}
-
- #~ pathid2runids = evaluate.getCollectedPathids2runids(device, CONFIGURED_PATHS, TRACKED_PATH, None)
- #~ print pathid2runids
- #~ pathid2runids = {'stairs_upward_r': ('01',)}
-
- #~ errors = evaluator.evalAll(pathid2runids, limit=5)
- #~ loop_pf_sigma(evaluator, pathid2runids, optrun, device)
- #~ eval_results_sigmas()
-
- #~ loop_pf_num_particles(evaluator, pathid2runids, optrun, device)
- #~ eval_results_num_particles()
-
- #~ errors2d = flatten_errors(errors)
- #~ for pname, errors in errors2d:
-
- #~ cubewidth = 1
- localizer = HMMLocalizer(env, cubewidth=2, prune_to=10000, num_threads=4, verbose=False)
- #~ localizer.test()
-
- #~ cubewidth = 3
- #~ localizer = LMSELocalizer(env, cubewidth=cubewidth, num_threads=4, verbose=False)
- m = Measurements()
- m.load(r'D:\loco-dev\dirk\tmp\tracked_path_synthetic\sigma_0.0\og1_classic\measurements_00.txt')
- #~ m.load(r'R:\xx.txt')
- print localizer.evaluateMeasurements(m)[0]
- #~ for r in localizer.decoder.history[0]:
- #~ print r
-
|