# -*- coding: utf-8 -*-
import sys
import time
from collections import namedtuple, defaultdict
import logging;
import math
import datetime
import Queue
import tempfile
import threading, thread
import hashlib
from zipfile import ZipFile
import urllib
import pylab
from matplotlib import pyplot as plt
import numpy as np
# from scipy.misc import pilutil
import scipy.misc as pilutil
from PIL import Image, ImageDraw
from scipy import ndimage
from concurrent import futures
from utils import normalizeLogarithmic, normalizeDeviceAdaption, loadLocations
import utils.accelerated as speed
from utils import daemonthread
from utils.path import path
from utils.volumeimage import VolumeImage
from utils.objparser import Mesh
from utils.xml import StringToTree, subelement, TreeToFile, TreeToString
from utils.materials import materialsAsXml
from utils.url import getOpener
from intersect import Mesh2D
# threaded access from volview gui
try:
from pyface.api import GUI
except ImportError:
GUI = None
lock = threading.Lock()
log = logging.getLogger('lws')
mpart_opener = getOpener(enableMultipart=True)
opener = getOpener()
#~ davalues = [(-90.0, 0.1), (-80.0, -4.8), (-70.0, -1.1), (-60.0, -1.4), (-50.0, 4.6), (-40.0, 8.6), (-30.0, 4.8), (0.0, -0.7)]
#~ davalues = [(-90.0, 7.7), (-85.0, -6.2), (-80.0, -3.7), (-75.0, -4.8), (-70.0, -3.3), (-65.0, -8.4), (-60.0, -4.2), (-55.0, -4.8), (-50.0, -4.5), (-45.0, 2.0), (-40.0, -0.2), (-30.0, -4.4), (-20.0, -6.1), (0.0, 1.0)]
def errorsFromLists(p1, p2, mode='3d'):
errors = []
if mode == '3d':
for (x1, y1, z1), (x2, y2, z2) in zip(p1, p2):
error = ((x2-x1)**2 + (y2-y1)**2 + (z2-z1)**2)**0.5
errors.append(error)
else:
for (x1, y1, z1), (x2, y2, z2) in zip(p1, p2):
error = ((x2-x1)**2 + (y2-y1)**2)**0.5
errors.append(error)
return errors
class AccessPoint(object):
def __init__(self, name, x, y, z, volumeImagePath, davalues):
t = time.time()
self.name = name
self.x = x
self.y = y
self.z = z
self.davalues = list(davalues)
self.vi = None
self.dbdata = None
self.loaded = False
# interpolate name or use plain
self.datfile = path((volumeImagePath % name) if '%s' in volumeImagePath else volumeImagePath)
if not self.datfile.exists():
log.warn('%s does not exists' % self.datfile.abspath())
else:
try:
self.vi = VolumeImage(self.datfile)
# volume image data in decibel values
#~ self.dbdata = self.vi.asDecibelData(-130)
self.dbdata = speed.toNormalizedDecibelwithDeviceAdaption(self.vi.data, davalues)
#~ log.debug('loaded ap %s from %s in %.2f sec' % (name, self.datfile.abspath(), time.time() - t))
self.loaded = True
except Exception:
log.exception('error during loading AP %s from %s' % (name, self.datfile.abspath()))
raise
Dimensions = namedtuple('Dimensions', 'tx ty tz sx sy sz')
class Environment(object):
def __init__(self, objfile=None, di=None, aps=(), davalues=None, locationfile=None, tmpdir=None, vi_path=None, bbox2d=None):
''' setup localization environment
@param objfile: path to .obj file that hold the geometry
@type objfile: str or path instance
@param di: dimensions of scene - will be retrieved from APs if omitted
@type di: Dimensions()
@param aps: list of aps
@type aps: [(apid, x, y, z), ] or [AccessPoint()]
@param locationfile: path to configured locations
@type locationfile: str or path instance
@param vi_path: path to volumeimages - if path contains a "%s" the apid will be interpolated
@param vi_path: str
'''
if tmpdir is None:
self.tmpdir = path(tempfile.gettempdir())
else:
self.tmpdir = path(tmpdir)
self.vi_path = vi_path
log.debug('using vis from %s' % vi_path)
t = time.time()
self.objfile = path(objfile)
if objfile:
self.mesh = Mesh(objfile)
log.debug('parsed objfile in %.1f secs' % (time.time() - t))
# HARDCODE
if bbox2d is None:
# 2d mesh cut
mesh_xlim = (-1, 58)
mesh_ylim = (-1, 18)
else:
mesh_xlim = int(bbox2d[0]), int(bbox2d[1])
mesh_ylim = int(bbox2d[2]), int(bbox2d[3])
pixel_per_meter = 16
self.mesh2d = Mesh2D(self.mesh, mesh_xlim, mesh_ylim, pixel_per_meter)
# dimensions values of environment
# will be autoloaded from volumeimages of APs if not given explicitly
self.di = di
self.aps = {}
if len(aps) > 0:
if not isinstance(aps[0], tuple):
# already constructed APs
for ap in aps:
self.addAP(ap)
else: # tuple-form (apid, x, y, z)
assert vi_path is not None, 'vi_path must be given if Accesspoints should be constructed'
assert davalues is not None, 'davalues must be given if Accesspoints should be constructed'
t = time.time()
#~ for apid, x, y, z in aps:
#~ try:
#~ self.addAP(AccessPoint(apid, x, y, z, vi_path, davalues))
#~ except Exception:
#~ pass
with futures.ThreadPoolExecutor(max_workers=4) as executor:
for apid, x, y, z in aps:
f = executor.submit(AccessPoint, apid, x, y, z, vi_path, davalues)
f.add_done_callback(lambda f: self.addAP(f.result()))
log.debug('loaded %s [of %s] aps in %.3f secs' % (len(self.aps), len(aps) , time.time() - t))
self.locationfile = locationfile
if locationfile is not None:
self.locid2location = loadLocations(locationfile)
else:
self.locid2location = None
def getCutFile(self, z):
lcf = self.tmpdir.joinpath(path('cut_%.0f.png' % z))
with lock:
if not lcf.exists():
log.debug('building 2d cut for z: %.0f' % z)
to_be_combined = []
for objname in self.mesh2d.objnames:
img = self.mesh2d.cut(objname, z)
to_be_combined.append(pilutil.fromimage(img))
all_objects = np.add.reduce(np.array(to_be_combined))
log.debug('cache file to %s' % lcf.abspath())
pilutil.imsave(lcf, all_objects)
return lcf
def addAP(self, ap):
''' adds AccessPoint volumeimage data by .dat file'''
self.aps[ap.name] = ap
# use dimensions of first added AP and assert all following dimensions are the same
if self.di is not None and ap.vi is None:
# probably aps without data
return
di = ap.vi.di
di = Dimensions(sx=di.sx, sy=di.sy, sz=di.sz, tx=di.tx, ty=di.ty, tz=di.tz)
if self.di is None:
self.di = di
else:
assert self.di == di
def replaceAPs(self, newVolumeImagePath):
for name, ap in self.aps.items():
self.aps[name] = AccessPoint(name, ap.x, ap.y, ap.z, volumeImagePath=newVolumeImagePath, davalues=ap.davalues)
def translateToVoxel(self, x, y, z, cube_width=1):
'''translate float real world coordinates into int coordinates
for space (self.di.x, self.di.y, self.di.z)'''
return (int(round((x - self.di.tx) / self.di.sx)) / cube_width,
int(round((y - self.di.ty) / self.di.sy)) / cube_width,
int(round((z - self.di.tz) / self.di.sz)) / cube_width)
def translateToMeter(self, x, y, z):
'''translate int coordinates into float real world coordinates
for space (self.di.x, self.di.y, self.di.z)'''
return (x * self.di.sx + self.di.tx,
y * self.di.sy + self.di.ty,
z * self.di.sz + self.di.tz)
def cube2meter(self, x, y, z, cubewidth):
#~ print x,y,z
x = x * cubewidth# + cubewidth / 2.0
y = y * cubewidth# + cubewidth / 2.0
z = z * cubewidth# + cubewidth / 2.0
#~ print x,y,z
# estimated x, y, z in real world coords
return self.translateToMeter(x, y, z)
def translatePixelToMesh2dPixel(self, x, y):
''' translate self.vi int coordinates into mesh2d pixel coordinate space '''
in_m_y, in_m_x, _ = self.translateToMeter(x, y, 0)
return self.mesh2d.meter2pixel(in_m_x, in_m_y)
def translateMeterToMesh2dPixel(self, x, y):
''' translate self.vi meter coordinates into mesh2d pixel coordinate space '''
return self.mesh2d.meter2pixel(x, y)
def translateMeterToMesh2dPixel(self, x, y):
''' translate self.vi meter coordinates into mesh2d pixel coordinate space '''
return self.mesh2d.meter2pixel(x, y)
def createMeasurement(self, x, y, z, ts, noise, sample_environment=0):
values = {}
for ap in self.aps.values():
v = ap.dbdata[self.translateToVoxel(x, y, z)]
if v <= -100:
continue # ignore ap
# add some samples from surrounding
if sample_environment > 0:
se = sample_environment
additional = [(x+se,y,z),(x-se,y,z),(x,y+se,z),(x,y-se,z),(x,y,z+se),(x,y,z-se)]
vs = []
for _x, _y, _z in additional:
_v = ap.dbdata[self.translateToVoxel(_x, _y, _z)]
vs.append(_v)
v = sum(vs + [v]) / (len(vs)+1)
#~ if v < -80:
#~ continue
# add noise with variance = noise
if noise > 0:
v += np.random.normal(scale=noise)
values[ap.name] = v
return Measurement(ts, values, Position(x, y, z))
def generateSyntheticMeasurementsAtPositions(self, positions, speed=0.6, noise=5, sample_environment=0):
measurements = Measurements()
curr_ts = 0
_x, _y, _z = self.cube2meter(*self.translateToVoxel(*positions[0], cube_width=1), cubewidth=1)
measurements.add(self.createMeasurement( _x, _y, _z, ts=curr_ts, noise=noise, sample_environment=sample_environment))
for (x1, y1, z1), (x2, y2, z2) in zip(positions, positions[1:]):
delta_v = (x2-x1, y2-y1, z2-z1) # delta vector
distance = (delta_v[0]**2 + delta_v[1]**2 + delta_v[2]**2)**0.5 # euclidian distance
curr_ts += distance / speed
_x, _y, _z = self.cube2meter(*self.translateToVoxel(x2, y2, z2, cube_width=1), cubewidth=1)
measurements.add(self.createMeasurement(_x, _y, _z, curr_ts, noise, sample_environment))
return measurements
def generateSyntheticMeasurementsFromCorners(self, corners, speed=0.6, granularity=0.5, noise=5):
''' Generate a synthetic series of measurements at coords
by simulating a walk with speed in m/s.
Granularity (in m) is used to interpolate the points between two coords
'''
measurements = Measurements()
curr_ts = 0 # initialize time with 0
for (x1, y1, z1), (x2, y2, z2) in zip(corners, corners[1:]):
_x, _y, _z = self.cube2meter(*self.translateToVoxel(x1, y1, z1, cube_width=1), cubewidth=1)
measurements.add(self.createMeasurement(_x, _y, _z, curr_ts, noise))
delta_v = (x2-x1, y2-y1, z2-z1) # delta vector
distance = (delta_v[0]**2 + delta_v[1]**2 + delta_v[2]**2)**0.5 # euclidian distance
if distance > 0:
num_intermediate = int(distance / granularity)
for i in range(1, num_intermediate):
_x, _y, _z = (
x1 + (delta_v[0] * i / float(num_intermediate)),
y1 + (delta_v[1] * i / float(num_intermediate)),
z1 + (delta_v[2] * i / float(num_intermediate)))
curr_distance = i / float(num_intermediate) * distance
_curr_ts = curr_ts + curr_distance * speed**-1
_x, _y, _z = self.cube2meter(*self.translateToVoxel(_x, _y, _z, cube_width=1), cubewidth=1)
#~ _x, _y, _z = self.cube2meter(_x, _y, _z, cubewidth=1)
measurements.add(self.createMeasurement(_x, _y, _z, _curr_ts, noise))
curr_ts += distance * speed**-1
else:
# add 3 measurements if no movements
measurements.add(self.createMeasurement(x1, y1, z1, curr_ts + 1, noise))
measurements.add(self.createMeasurement(x1, y1, z1, curr_ts + 2, noise))
measurements.add(self.createMeasurement(x1, y1, z1, curr_ts + 3, noise))
curr_ts = curr_ts + 3
return measurements
def buildSimulations(self, scene_name, materials, ap2power, bbox, resolution,
density, numphotons, disabledObjects, baseUrl, onResult=None):
''' build AP simulations using a set of raytracer params over the server HTTP interface
this will refresh the current ap data
'''
oh = hashlib.sha1(self.objfile.bytes()).hexdigest()
def queue():
log.info('using %s materials' % len(materials.items()))
for matname, mat in materials.items():
log.info('%s: %s, %s' % (matname, mat.reflect, mat.alpha))
log.info('using objectfile %s [hash: %s]' % (self.objfile.abspath(), oh))
runids = {}
for ap in self.aps.values():
log.info('AP params: id: %s, power: %s x,y,z: %s, %s, %s' % (ap.name, ap2power[ap.name], ap.x, ap.y, ap.z))
outname = '%s_%s' % (scene_name, ap.name)
b = bbox
qparams = urllib.urlencode(dict(
scenename = scene_name,
objhash = oh,
materials = TreeToString(materialsAsXml(materials)),
ap = str({'x': ap.x, 'y': ap.y, 'z': ap.z, 'power': ap2power[ap.name], 'id': ap.name, 'powerid': 'all'}),
bbox = str({'x1': b.x1, 'y1': b.y1, 'z1': b.z1, 'x2': b.x2, 'y2': b.y2, 'z2': b.z2}),
resolution = str({'x': resolution.x, 'y': resolution.y, 'z': resolution.z}),
density = str(density),
numphotons = str(numphotons),
disabledObjects = ','.join(disabledObjects)
))
log.info('queueing %s... ' % ap.name)
info = opener.open(baseUrl + '/queueJob', data=qparams).read()
if 'unknown objfile' in info:
log.info('transmitting objfile... [%s]' % self.objfile.abspath())
# transmit new version of objfile
params = {'objfile': open(self.objfile), 'expectedhash': oh}
mpart_opener.open(baseUrl + '/uploadobjfile', data=params)
log.info('queueing %s ... ' % ap.name)
# queue again
info = opener.open(baseUrl + '/queueJob', data=qparams).read()
try:
infotree = StringToTree(info)
except Exception:
log.error('cannot parse:\n' + info)
else:
assert infotree.getroot().tag == 'success'
runid = infotree.getroot().attrib['id']
runids[runid] = ap
log.debug('queueing sucessful [%s runs]' % len(runids))
return runids
def _retrieve(runid, apid):
url = baseUrl + '/getJobZipData?runid=%s' % runid
log.info('retrieving zipdata from %s...' % url)
try:
t = time.time()
res = opener.open(url).read()
log.info('got %.1f kb [%.1f kb/sec]' % (len(res) / 1024.0, (len(res) /1024.0) / (time.time() - t)))
except Exception:
log.exception('error during retrieving')
return
try:
datfile = path(self.vi_path % apid)
d = datfile.parent
if not d.exists():
d.mkdir()
tempf = d.joinpath('%s_%s.zip' % (apid, runid))
tempf.write_bytes(res)
try:
zf = ZipFile(tempf)
except Exception:
log.error('cannot open zipfile with contents:\n\n%s' % res[:1000])
raise
for n in zf.namelist():
log.debug('extracting %s' % n)
s = zf.read(n)
f = d.joinpath(n)
log.debug('storing %.1f kb to %s' % (len(s) / 1024.0, f.abspath()))
f.write_bytes(s)
assert datfile.exists(), '%s does not exist' % datfile.abspath()
except Exception:
log.exception('error during unzipping')
def _check(runids):
_getids = lambda s: {e for e in s.split(',') if e.strip()}
startts = time.time()
c = len(runids)
while len(runids) > 0:
_runids = ','.join(runids.keys())
try:
url = baseUrl + '/queryJobs?runids=%s' % _runids
res = opener.open(url).read()
tree = StringToTree(res)
if onResult is not None:
onResult(runids, tree)
finished = _getids(tree.getroot().attrib['finished'])
for runid in finished:
apid = runids[runid].name
log.info('run %s for ap %s is finished' % (runid, apid))
runids.pop(runid)
thread.start_new_thread(_retrieve, (runid, apid))
except Exception:
log.exception('error during querying jobs')
time.sleep(1.0)
log.info('quering finished, %s sims %.1f sec per sim' % (c, (time.time() - startts) / c))
if self.vi_path is not None:
self.replaceAPs(self.vi_path)
runids = queue()
thread.start_new_thread(_check, (runids, ))
Position = namedtuple('Position', 'x y z')
class Measurement(object):
''' holds rssi values for each AP at a given position '''
def __init__(self, timestamp, apdata, pos=(0,0,0), speed=None, interpolated=True):
self.pos = Position(*pos)
self.apdata = apdata # key AP().name, value rssi
self.timestamp = timestamp
self.speed = speed
self.interpolated = interpolated # to differentiate between interpolated/spawned and load measurements
def __repr__(self):
return str(self)
def __str__(self):
s = ' '.join('%s:%.3f' % (k, v) for k, v in sorted(self.apdata.items()))
ts = ('%.1f' % self.timestamp) if isinstance(self.timestamp, (float, int)) else self.timestamp.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]
pos = ('%.3f,%.3f,%.3f' % self.pos) if (self.pos.x, self.pos.y, self.pos.z) != (0,0,0) else 'n/a'
speed = ' speed:%.3f' % self.speed if self.speed is not None else ''
return 'ts:%s%s pos:%s %s' % (ts, speed, pos, s)
class Measurements(list):
''' a series of measurements'''
def __init__(self, measurements=None, fname=None):
if measurements is not None:
self[:] = measurements
elif fname is not None:
self.load(fname)
def add(self, m):
self.append(m)
def plot(self, fname=None, interpolate=False):
aspectRatio = 0.5
dpi = 60
width = 1000
figsize = width / float(dpi), width / float(dpi) * aspectRatio
fig = plt.figure(figsize=figsize)
ax = plt.axes([0.08, 0.07, 0.90, 0.91], xlabel='Timestamp', ylabel=r'RSS')
apids = set()
for m in self:
apids.update(m.apdata.keys())
apids = list(apids)#[:3]
for apid in apids:
xs = []
ys = []
for m in self:
if apid in m.apdata:
xs.append(m.timestamp)
ys.append(m.apdata[apid])
xs = pylab.date2num(xs)
ax.xaxis_date()
if interpolate:
xsi = pylab.linspace(xs[0], xs[-1], 200);
ysi = pylab.stineman_interp(xsi, xs, ys, yp=None)
ax.plot(xsi, ysi)
else:
ax.plot(xs, ys)
ax.scatter(xs, ys,)
#~ for m in self:
#~ xs.append(m.timestamp)
#~ ys.append(m.apdata.values())
#~ plot(xs, self.asArray(self.apdata.keys()))
leg = plt.legend(apids)
ltext = leg.get_texts()
llines = leg.get_lines()
plt.setp(ltext, fontsize='x-small')
plt.setp(llines, linewidth=0.7)
leg.draw_frame(False)
if fname is None:
plt.show()
else:
fig.savefig(fname)
def save(self, fname):
with open(fname, 'w') as f:
f.write(str(self))
def loadFromString(self, s):
self[:] = []
lines = [l for l in s.strip().split('\n') if l]
for l in lines:
apdata = {}
ts = None
pos = Position(0, 0, 0)
speed = None
for s in l.split():
if s.startswith('ts:'):
if 'T' in s:
if '.' in s[3:]:
ts = datetime.datetime.strptime(s[3:], '%Y-%m-%dT%H:%M:%S.%f')
else:
ts = datetime.datetime.strptime(s[3:], '%Y-%m-%dT%H:%M:%S')
else:
ts = datetime.datetime.fromtimestamp(float(s[3:]))
elif s.startswith('pos:'):
pos = Position(*[float(e) for e in s[4:].split(',')])
elif s.startswith('speed:'):
speed = float(s[6:])
else:
name, value = s.split(':')
#~ if float(value) > -80:
apdata[name] = float(value)
self.add(Measurement(ts, apdata, pos, speed=speed, interpolated=False))
def load(self, fname):
try:
self.loadFromString(open(fname, 'r').read())
except IOError:
log.exception('cannot load %s' % path(fname).abspath())
def spawn(self, cube_distance, walking_speed=1.5, interpolate_positions=True):
''' spawn a number new measurements between 2 original measurements '''
assert len(self) > 1, len(self)
newData = []
for m1, m2 in zip(self, self[1:]):
#~ assert m2.timestamp > m1.timestamp, '%s > %s' % (m2.timestamp, m1.timestamp)
if m2.speed is not None:
speed = m2.speed * 1.0
else:
speed = walking_speed * 1.5
duration = (m2.timestamp - m1.timestamp).total_seconds()
if interpolate_positions:
delta_x = m2.pos.x - m1.pos.x
delta_y = m2.pos.y - m1.pos.y
delta_z = m2.pos.z - m1.pos.z
max_distance_walked = duration * speed
min_states_needed = int(max_distance_walked / cube_distance)
if min_states_needed < 2:
newData.append(m1)
continue
interval = duration / min_states_needed
# get apids, that are in both measurements available
apids = set(m1.apdata.keys()).intersection(m2.apdata.keys())
newData.append(m1)
for i in range(1, min_states_needed):
seconds = int(interval * i)
microseconds = 10**6 * ((interval * i) - seconds)
ts = m1.timestamp + datetime.timedelta(seconds=seconds, microseconds=microseconds)
apdata = {}
for apid in apids:
apdata[apid] = m1.apdata[apid] + (m2.apdata[apid] - m1.apdata[apid]) * (float(i) / min_states_needed)
if interpolate_positions:
pos = Position(m1.pos.x + i * delta_x / min_states_needed,
m1.pos.y + i * delta_y / min_states_needed,
m1.pos.z + i * delta_z / min_states_needed)
else:
pos = (0, 0, 0)
#~ print pos
newData.append(Measurement(ts, apdata, pos=pos))
# the last has always to be added
newData.append(m2)
if len(newData) > len(self):
#~ log.info('SPAWNING: increased measurements from %s to %s' % (len(self), len(newData)))
return Measurements(newData)
else:
log.info('SPAWNING: not needed (%s < %s)' % (len(newData), len(self)))
return Measurements(self)
def __str__(self):
res = []
for m in self:
res.append(str(m))
return '\n'.join(res)
def __getitem__(self, val):
if isinstance(val, int):
return super(Measurements, self).__getitem__(val)
else: # slice
return Measurements(super(Measurements, self).__getitem__(val))
def asArray(self, apids, no_signal=1, unavailable=2):
available_aps = set()
for m in self:
available_aps.update(m.apdata.keys())
res = np.zeros((len(self), len(apids)), dtype=np.double) + no_signal
#~ lastApData = defaultdict(lambda: -100)
for i, m in enumerate(self):
for j, apid in enumerate(apids):
if not apid in available_aps:
res[i, j] = unavailable
elif apid in m.apdata:
res[i, j] = m.apdata[apid]
# else no_signal
return res
def interpolateLocations(self, locations, timestamps):
''' interpolate positions of measurements by evaluating "checkpoints" given in locations'''
if len(timestamps) == 0 or len(self) == 0:
return
assert not isinstance(self[0].timestamp, float), 'does only work with datetime based timestamps'
def _get(ts):
try:
return datetime.datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S.%f')
except ValueError:
return datetime.datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S')
if isinstance(timestamps[0], basestring):
timestamps = [_get(ts) for ts in timestamps]
for m in self:
if m.timestamp < timestamps[0]:
# everything before first checkpoint timestamp is assumed to be at this checkpoint
m.pos = Position(*locations[0])
elif m.timestamp > timestamps[-1]:
# everything after last checkpoint timestamp is assumed to be at this checkpoint
m.pos = Position(*locations[-1])
else:
for loc1, ts1, loc2, ts2 in zip(locations, timestamps, locations[1:], timestamps[1:]):
if ts1 < m.timestamp < ts2:
# much time has passed between two location checkpoints
loc1_loc2_duration = (ts2 - ts1).total_seconds()
loc1_loc2_distance = ((loc2[0] - loc1[0])**2 + (loc2[1] - loc1[1])**2 + (loc2[2] - loc1[2])**2)**0.5
# difference of measurement time to last checkpoint time
delta_ts = (m.timestamp - ts1).total_seconds()
# how much relative distance do we have wakled between two checkpoints
relative_distance = delta_ts / loc1_loc2_duration
x = loc1[0] + (loc2[0] - loc1[0]) * relative_distance
y = loc1[1] + (loc2[1] - loc1[1]) * relative_distance
z = loc1[2] + (loc2[2] - loc1[2]) * relative_distance
m.pos = Position(x, y, z)
m.speed = loc1_loc2_distance / loc1_loc2_duration
break
#~ print loc, ts
assert m.pos.x != 0
def interpolateFromLocationString(self, s, locid2location):
locids, timestamps = zip(*[l.strip().split() for l in s.split('\n') if l.strip()])
locs = [locid2location[int(locid[6:])] for locid in locids]
locations = [(l.x, l.y, l.z) for l in locs]
timestamps = [ts[3:] for ts in timestamps]
self.interpolateLocations(locations, timestamps)
def interpolateSignals(self):
''' interpolate missing ap values from neighboring measurements (via forwardscan)'''
last_apdata = self[0].apdata
last_ts = self[0].timestamp
for i, m in enumerate(self[1:]):
for apid in last_apdata:
if apid in m.apdata:
continue
# forward scan
duration = delta = None
for j in range(i+1, len(self)):
if apid in self[j].apdata:
# found next signal
duration = (self[j].timestamp - last_ts).total_seconds()
delta = self[j].apdata[apid] - last_apdata[apid]
break
# interpolate signal from i+1 to j
if duration is not None:
for j in range(i+1, j):
ratio = (self[j].timestamp - last_ts).total_seconds() / duration
self[j].apdata[apid] = last_apdata[apid] + delta * ratio
last_apdata = m.apdata
last_ts = m.timestamp
@staticmethod
def fromTrackedMeasurements(locid2location, basePath, deviceid, pathid, runid):
d = path(basePath).joinpath(deviceid, pathid)
measurements = Measurements()
measurements.load(d.joinpath('measurements_%s.txt' % runid))
#~ for m in measurements:
#~ print m.timestamp
if measurements[0].pos == (0, 0, 0):
locstring = d.joinpath('locations_%s.txt' % runid).text()
measurements.interpolateFromLocationString(locstring, locid2location)
else:
pass # assume positions are stored in measurements
return measurements
def updateScalarField(env, gui, data, inverted=False, data_range=None):
gui.scalar_field.mlab_source.scalars = data
#~ _, _, z = env.translateToVoxel(0, 0, 1.5)
#~ gui.ipw_z.ipw.slice_index = z
slm = gui.ipw_z.module_manager.scalar_lut_manager
#~ print dir(slm)
if data_range is None:
_min, _max = data.min(), data.max()
print (_min, _max)
slm.data_range = (_min, _max)
else:
slm.data_range = data_range
#~ slm.data_range = (10, 35)
slm.reverse_lut = inverted
def calculateDistance(env, gui, _log, pos=2):
'''build distance heatmap in gui for a position in a measurement path'''
p = Measurements()
p.load(r'N:\lws\tmp\path_iconia_0.txt')
measurement = p[pos]
all_deltas = []
for name, rssi in measurement.apdata.items():
if not name in ['107', '108', '109p2', 'gaia']:
print 'skipping %s' % name
continue
ap = env.aps[name]
print name, rssi
deltas = ap.dbdata - rssi
all_deltas.append(deltas)
if len(all_deltas) == 0:
print 'nothing measured'
return
print 'got all deltas'
all_squared_deltas = [np.square(deltas) for deltas in all_deltas]
print 'squared deltas'
distances = np.sqrt(np.add.reduce(np.array(all_squared_deltas)))
print 'got distances'
min_dist = distances.min()
max_dist = distances.max()
updateScalarField(env, gui, distances, inverted=True, data_range=(min_dist+1, min_dist+25))
return distances
def gauss_nd_in_logspace(means, sq_sigmas, values, reduced=True):
''' n-dimensional gaussian with independent components in logspace'''
s = 0
num_emitted = 0
for mu, sq_sigma, x in zip(means, sq_sigmas, values):
if x == -2:
continue
elif x == -1:
if mu > -100:
s = s + 5
num_emitted = num_emitted + 1
continue
#~ if sq_sigma > 5:
#~ sq_sigma = 5
#~ assert sq_sigma > 0, sq_sigma
if not reduced:
log_p = -1 * (-(x - mu)**2 / (2 * sq_sigma) - math.log(math.sqrt(2 * math.pi * sq_sigma)))
#~ log_p = -(x - mu)**2 / (2 * sq_sigma) - math.log(sq_sigma)
else:
# pooled variance
log_p = x - mu if x > mu else mu - x
num_emitted += 1
s += log_p
#~ if num_emitted > 0:
#~ s = s / num_emitted * 10
return s
def debuggable_viterbi_decode(localizer, path):
''' pure python version of HMM-decoder, only used for learning/debugging
'''
def _fmt(a):
return ','.join('%.1f' % e for e in a)
env = localizer.env
transition_probs = localizer.transition_probs
emission_probs = localizer.emission_probs
s_shape_x, s_shape_y, s_shape_z = localizer.cubed_data_shape
t_shape_x, t_shape_y, t_shape_z = transition_probs.shape[3:]
numstates = s_shape_x * s_shape_y * s_shape_z
assert numstates * len(path) < 100000, 'disable me, if you want to wait... [%sk states]' % (numstates * len(path) / 1000)
all_probs = np.zeros((len(path), s_shape_x, s_shape_y, s_shape_z)) - 10**6
back_track = np.zeros((len(path), s_shape_x, s_shape_y, s_shape_z, 3), dtype=np.int)
f = open('r:/out_%s.txt' % 0, 'w')
t = time.time()
log.debug('calc initial prob for %sk states in col 0 [of %.2f mio states]' % (numstates / 1000, numstates * len(path) / 10.0**6))
for sx in range(s_shape_x):
for sy in range(s_shape_y):
for sz in range(s_shape_z):
means, sq_sigmas, rss_values = emission_probs[sx, sy, sz, 0], emission_probs[sx, sy, sz, 1], path[0]
#~ print sx, sy, sz, sq_sigmas
p_x_s = gauss_nd_in_logspace(means, sq_sigmas, rss_values)
assert p_x_s > 0, '%s %s %s' % (means, sq_sigmas, rss_values)
#~ print sx, sy, sz, em_prob
in_m = '(%.1f, %.1f, %.1f)' % env.translateToMeter(sx * localizer.cubewidth, sy * localizer.cubewidth, sz * localizer.cubewidth)
f.write('%s %s| p(x|s) %.1f mus: %s sigmas: %s xs: %s \n' % ( (sx, sy, sz), in_m, p_x_s, _fmt(means), _fmt(sq_sigmas), _fmt(rss_values)))
all_probs[0, sx, sy, sz] = p_x_s
#~ return []
# we have to locate the (previous) state referenced by the transition index
# if the transition_shape is (5, 5, 3) then (tx=2, ty2, tz=1) then previous state is the current state
offset_x = t_shape_x / 2
offset_y = t_shape_y / 2
offset_z = t_shape_z / 2
best = []
for sx in range(s_shape_x):
for sy in range(s_shape_y):
for sz in range(s_shape_z):
best.append((all_probs[0, sx, sy, sz], (sx, sy, sz)))
f.write('\n')
for p, coords in list(reversed(sorted(best)))[:20]:
in_m = '(%.1f, %.1f, %.1f)' % env.translateToMeter(coords[0] * localizer.cubewidth, coords[1] * localizer.cubewidth, coords[2] * localizer.cubewidth)
f.write('%s %s %.1f\n' % (coords, in_m, p))
# xs -> emitted vector
for i, rss_values in enumerate(path[1:][:], 1):
f = open('r:/out_%s.txt' % i, 'w')
log.debug('calc probs col %s of %s [%.1fk states/sec]' % (i, len(path), (numstates * i / 1000) / (time.time() - t + 0.000001)))
for sx in range(s_shape_x):
for sy in range(s_shape_y):
for sz in range(s_shape_z):
transitions = transition_probs[sx, sy, sz]
means, sq_sigmas = emission_probs[sx, sy, sz, 0], emission_probs[sx, sy, sz, 1]
# calc emission prob for vector with rss values
p_x_s = gauss_nd_in_logspace(means, sq_sigmas, rss_values)
min_cost = 10**99
best_prev_state = (None, None, None)
for tx in range(t_shape_x):
prev_idx_x = sx + tx - offset_x
if not 0 <= prev_idx_x < s_shape_x:
continue
for ty in range(t_shape_y):
prev_idx_y = sy + ty - offset_y
if not 0 <= prev_idx_y < s_shape_y:
continue
for tz in range(t_shape_z):
prev_idx_z = sz + tz - offset_z
if not 0 <= prev_idx_z < s_shape_z:
continue
prev_cost = all_probs[i-1, prev_idx_x, prev_idx_y, prev_idx_z]
cost = prev_cost + transitions[tx, ty, tz]
if cost < min_cost:
min_cost = cost
best_prev_state = (prev_idx_x, prev_idx_y, prev_idx_z)
f.write('%s -> %s with p %.1f | p(x|s) %1f\n' % ( (sx, sy, sz), best_prev_state, min_cost, p_x_s ))
all_probs[i, sx, sy, sz] = min_cost + p_x_s
back_track[i, sx, sy, sz] = best_prev_state
best = []
for sx in range(s_shape_x):
for sy in range(s_shape_y):
for sz in range(s_shape_z):
best.append((all_probs[i, sx, sy, sz], (sx, sy, sz)))
f.write('\n')
for p, coords in list(reversed(sorted(best)))[:5]:
f.write('%s %.1f\n' % (coords, p))
print '----'
lastCol = i
print 'lastcol_idx: %s' % lastCol
print '----'
# find best path
min_cost = 10**6
best_last_state = (None, None, None)
for sx in range(s_shape_x):
for sy in range(s_shape_y):
for sz in range(s_shape_z):
cost = all_probs[lastCol, sx, sy, sz]
if cost < min_cost:
best_last_state = sx, sy, sz
min_cost = cost
print 'best last state: (%s, %s, %s)' % best_last_state
estimated_path = []
curr_state = best_last_state
curr_cost = min_cost
for j in range(lastCol, -1, -1):
estimated_path.append((curr_state, curr_cost))
#~ print curr_state
print j, curr_state[0], curr_state[1], curr_state[2]
curr_cost = all_probs[j, curr_state[0], curr_state[1], curr_state[2]]
if j > -1:
curr_state = back_track[j, curr_state[0], curr_state[1], curr_state[2]]
return list(reversed(estimated_path))
class Localizer(object):
def __init__(self, env, gui=None, num_threads=0, max_aps=None, verbose=True):
self.env = env
self.gui = gui
self.num_threads = num_threads # 0=autodetect
aps = [(apid, ap.dbdata) for apid, ap in self.env.aps.items() if ap.loaded]
self.apdata_list = [ap[1] for ap in aps]
self.apids = [ap[0] for ap in aps]
# autocalced
self.cubed_data_shape = (0, 0, 0)
self.uncubed_data_shape = (0, 0, 0)
self.blockedCubes = None
if self.gui:
gui.localizer = self
self.walking_speed = self.gui.synthetic_measurements_speed if self.gui else 1.5
self.max_aps = max_aps
self.verbose = verbose
self.decoder = None # holds the cython aspect
self.decoder = None # holds the cython aspect for sequential processing
def _setupStates(self):
vi_dimensions = self.env.aps.values()[0].vi.di
self.uncubed_data_shape = (vi_dimensions.x, vi_dimensions.y, vi_dimensions.z)
t = time.time()
#~ self.emission_probs = np.load('r:/eprobs.npy')
self.emission_probs = speed.buildStatesWithEmissionProbs(self.uncubed_data_shape, self.apdata_list, self.cubewidth, 1, pooled_variance=2)
#~ np.save('r:/eprobs.npy', self.emission_probs)
self.cubed_data_shape = self.emission_probs.shape[:3]
log.debug('build emission probs in %.3f sec' % (time.time() - t))
self.blockedCubes = blockedCubes = self._buildTriangleCubeIntersection()
speed.evalUnreachableAirCubes(blockedCubes, cube_height=self.env.di.sz * self.cubewidth, max_reachable_height=2.0)
def analyzeEstimatedPath(self, estimated_path, cube_values=None):
errors = []
data = np.zeros(self.cubed_data_shape)
for i, (x, y, z) in enumerate(estimated_path):
in_m = self.env.translateToMeter(x * self.cubewidth, y * self.cubewidth, z * self.cubewidth)
if self.verbose == 2:
log.debug('%s %s (%.1f, %.1f, %.1f)' % (i, (x, y, z), in_m[0], in_m[1], in_m[2])) #, measurement[i].pos
if i > 0:
x1, y1, z1 = estimated_path[i-1]
# will only work for 5,5,3 model
if abs(x1 - x) == 2 or abs(y1 - y) == 2 or abs(x1 - x) == 2:
if cube_values is not None:
data[(x1 + x) / 2, (y1 + y) / 2, (z1 + z) / 2] = (cube_values[i-1] + cube_values[i]) / 2.0
else:
data[(x1 + x) / 2, (y1 + y) / 2, (z1 + z) / 2] = (len(estimated_path) * 5) + i * 10
#~ print i, cube_values[i]
if cube_values is not None:
if i < len(cube_values):
data[x, y, z] = cube_values[i]
else:
log.warn('index %s execeeds len(cube_values)' % i)
else:
data[x, y, z] = (len(estimated_path) * 5) + i * 10
#~ data[x, y, z] = i * 10
data = speed.uncube(data, self.uncubed_data_shape)
if self.gui:
gui = self.gui
def doStuff():
gui.scalar_field.mlab_source.scalars = data
gui.surface_number_of_contours = 10
gui.surface_view.visible = True
#~ updateScalarField(self.env, gui, data)
#~ _, _, z = self.env.translateToVoxel(0, 0, in_m[2])
#~ gui.ipw_z.ipw.slice_index = z
if gui.gui_thread_id != thread.get_ident():
GUI.invoke_later(doStuff)
else:
doStuff()
return
def viewHistory(self, idx, count=None, store=False):
''' view the top hypothesis at a signal index
use count for slicing with [count:]
TODO: move since gui-specific'''
gui = self.gui
#~ data = np.zeros((gui.cube_res_x, gui.cube_res_y, gui.cube_res_z))
#~ a = self.decoder.history[idx][count:]
#~ min_p = np.min(a)
#~ for (x, y, z, p) in a:
#~ data[x, y, z] = p - min_p
#~ uncubeddata = speed.uncube(data, (gui.res_x, gui.res_y, gui.res_z))
if idx >= len(self.decoder.history):
log.warn('idx %s >= %s' % (idx, len(self.decoder.history)))
idx = len(self.decoder.history) - 1
elif idx < 0:
idx = 0
count = count if count is not None else 10000
count = min(count, len(self.decoder.history[0]))
log.info('view history %s with %s...' % (idx, count))
for i, (x, y, z, v) in enumerate(self.decoder.history[0][-10:]):
log.info('top: %s xyz: %d,%d,%d, value: %.2f' % (10-i, x, y, z, v))
uncubeddata = speed.viewPruningHistory(self.decoder.history, idx, self.uncubed_data_shape, self.cubewidth,
count, view_values=True, raw_values=False)
def doStuff():
#~ self.gui.scalar_field.mlab_source.scalars = uncubeddata
self.gui.scalar_field.mlab_source.set(scalars=uncubeddata)
if store:
print 'XXX', idx
self.gui.scene.mlab.savefig('r:/out_%03d.png' % idx)
log.info('view history done')
GUI.invoke_later(doStuff)
def _buildTriangleCubeIntersection(self):
#_isblocking = lambda n: any(s in n.lower() for s in {'concrete', 'wall', 'window', 'stairs', 'fascade'})
_isblocking = lambda n: any(s in n.lower() for s in {'walls_green','walls_red','toilet','roof','piles','floor','stairs', 'fascade',})
_isUnpassable = lambda n: any(s in n.lower() for s in {'cupboard', 'table', 'hardware','railing','elevator','boxes'})
_isNotblocking = lambda n: any(s in n.lower() for s in {'glassdoor','irondoor','doors'})
blockingObjectNames = [n for n in self.env.mesh.objects.keys() if _isblocking(n)]
unblockingObjectNames = [n for n in self.env.mesh.objects.keys() if _isNotblocking(n)]
unpassableObjectNames = [n for n in self.env.mesh.objects.keys() if _isUnpassable(n)]
print "blocking materials:" , blockingObjectNames
CACHED = True
CACHE_FILE = self.env.tmpdir.joinpath('blocked_cubes_%s.npy' % self.cubewidth)
MIN_UNBLOCK_WIDTH = 3 # minimum cubewidth that triggers unblocking
if CACHED and CACHE_FILE.exists():# and self.env.objfile.mtime < CACHE_FILE.mtime:
try:
return np.load(CACHE_FILE)
except Exception:
log.exception('error during loading')
log.info('building blocked cubes... [%s]' % CACHE_FILE.abspath())
vi_dimensions = self.env.aps.values()[0].vi.di
#~ assert len(set(blockingObjectNames).intersection(unblockingObjectNames)) == 0
t = time.time()
#HACK for e1 model
ADD_BLOCKING = [((-32.,-11.,0.2),(-32.,14.,0.2),(32.,14.,0.2)),((32.,-11.,0.2),(32.,14.,0.2),(-32.,-11.,0.2))]
blts = list(ADD_BLOCKING)
for name in blockingObjectNames:
blts.extend(self.env.mesh.itertriangles(name))
# HACK for umic model
ADDITIONAL_UNBLOCKING = []
#= [((38.16, 9.8, 0.1), (38.16, 9.8, 1.8), (37.5, 9.8, 0.1)),
# ((37.0, 4.0, 0.1), (38.0, 3.9, 1.8), (38.0, 3.7, 0.1)),
# ]
unblts = list(ADDITIONAL_UNBLOCKING)
if self.cubewidth > MIN_UNBLOCK_WIDTH:
for name in unblockingObjectNames:
unblts.extend(self.env.mesh.itertriangles(name))
unpass = []
for name in unpassableObjectNames:
unpass.extend(self.env.mesh.itertriangles(name))
bl_triangles = np.zeros((len(blts), 3, 3))
unpass_triangles = np.zeros((len(unpass), 3, 3))
unbl_triangles = np.zeros((len(unblts), 3, 3))
all_ts = [(blts, bl_triangles),
(unpass, unpass_triangles),
(unblts, unbl_triangles)]
for ts, np_ts in all_ts:
for i, (p1, p2, p3) in enumerate(ts):
np_ts[i, 0, 0] = p1[0]
np_ts[i, 0, 1] = p1[1]
np_ts[i, 0, 2] = p1[2]
np_ts[i, 1, 0] = p2[0]
np_ts[i, 1, 1] = p2[1]
np_ts[i, 1, 2] = p2[2]
np_ts[i, 2, 0] = p3[0]
np_ts[i, 2, 1] = p3[1]
np_ts[i, 2, 2] = p3[2]
blockedCubes = speed.buildTriangleCubeIntersection(bl_triangles, unpass_triangles, unbl_triangles, self.cubed_data_shape, self.cubewidth, vi_dimensions)
log.debug('evaluated blocked cubes in %.3f sec' % (time.time() - t))
np.save(CACHE_FILE, blockedCubes)
return blockedCubes
def analyzePathSequence(self, tmpdir, path_sequence, path_sequence_seq, measurements, errors_2d, seq_errors_2d, name, errors_3d=None, seq_errors_3d=None):
decoder = self.decoder
c2m = self.env.cube2meter
apids = self.apids
ma = measurements.asArray(apids)
eprobs = self.emission_probs
#~ tprobs = self.transition_probs
#~ ox, oy, oz = self.transition_shape / 2
x, y, z = path_sequence[0]
xs, ys, zs = path_sequence_seq[0]
means, sigmas = eprobs[x, y, z][0], eprobs[x, y, z][1]
ep = 0# decoder.getEmissionCost(eprobs[x, y, z, 0], ma[0])
#~ ep = gauss_nd_in_logspace(means, sigmas, ma[0], reduced=True)
tree = StringToTree('')
#~ tree = StringToTree('' % tuple(self.transition_shape))
pel = tree.getroot()
xm, ym, zm = c2m(x, y, z, self.cubewidth)
x_seq, y_seq, z_seq = c2m(xs, ys, zs, self.cubewidth)
interpolated = 'true' if measurements[0].interpolated else 'false'
attribs = {'ep': round(ep, 2), 'tp': 0, 'sx': x, 'sy': y, 'sz': z,
'x': round(xm, 2), 'y': round(ym, 2), 'z': round(zm, 2),
'xseq': round(x_seq, 2), 'yseq': round(y_seq, 2), 'zseq': round(z_seq, 2),
'dx': 0, 'dy': 0, 'dz': 0, 'interpolated': interpolated,
'err2d': round(errors_2d[0], 3), 'seqerr2d': round(seq_errors_2d[0], 3),
'err3d': round(errors_3d[0], 3), 'seqerr3d': round(seq_errors_3d[0], 3),
'ts': 0,
'truex':measurements[0].pos.x,'truey':measurements[0].pos.y,'truez':measurements[0].pos.z}
e = subelement(pel, 'pos', attribs=attribs)
attribs = {'ap_%s' % apid: '%.1f/%.1f%s' % (eprobs[x, y, z, 0, i], eprobs[xs, ys, zs, 0, i], ma[0][i]) for i, apid in enumerate(apids)}
subelement(e, 'apdata', attribs=attribs)
first_ts = measurements[0].timestamp
zipped = zip(path_sequence, path_sequence[1:], path_sequence_seq[1:], ma[1:], measurements[1:], errors_2d[1:], seq_errors_2d[1:], errors_3d[1:], seq_errors_3d[1:])
for (x1, y1, z1), (x2, y2, z2), (x3, y3, z3), values, m, err2d, seqerr2d, err3d, seqerr3d in zipped:
dx, dy, dz = x1-x2, y1-y2, z1-z2
#~ tx, ty, tz = ox + dx, oy + dy, oz + dz
tp = 0#tprobs[x2, y2, z2, tx, ty, tz]
ep = 0#decoder.getEmissionCost(eprobs[x2, y2, z2, 0], values)
#~ print round(ep, 1), round(tp), '%s,%s,%s' % (dx, dy, dz)
interpolated = 'true' if m.interpolated else 'false'
xm, ym, zm = c2m(x2, y2, z2, self.cubewidth)
x_seq, y_seq, z_seq = c2m(x3, y3, z3, self.cubewidth)
attribs = {'ep': round(ep, 2), 'tp': round(tp, 2), 'sx': x2, 'sy': y2, 'sz': z2,
'x': round(xm, 2), 'y': round(ym, 2), 'z': round(zm, 2),
'xseq': round(x_seq, 2), 'yseq': round(y_seq, 2), 'zseq': round(z_seq, 2),
'dx': dx, 'dy': dy, 'dz': dz, 'interpolated': interpolated,
'err2d': round(err2d, 3), 'seqerr2d': round(seqerr2d, 3),
'err3d': round(err3d, 3), 'seqerr3d': round(seqerr3d, 3),
'ts': '%.1f' % (m.timestamp - first_ts).total_seconds(),
'truex':m.pos.x,'truey':m.pos.y,'truez':m.pos.z}
e = subelement(pel, 'pos', attribs=attribs)
attribs = {'ap_%s' % apid: '%.1f/%.1f/%.1f' % (eprobs[x2, y2, z2, 0, i], eprobs[x3, y3, z3, 0, i], values[i]) for i, apid in enumerate(apids)}
subelement(e, 'apdata', attribs=attribs)
tree.getroot().attrib['err2d'] = '%.3f' % (sum(errors_2d) / len(errors_2d))
tree.getroot().attrib['seqerr2d'] = '%.3f' % (sum(seq_errors_2d) / len(seq_errors_2d))
TreeToFile(tree, tmpdir.joinpath('%s.xml' % name))
def params(self):
return 'undefined'
def toRealWordPaths(self, measurements, *paths):
real_path = []
other_paths = [[] for i in range(len(paths))]
zipped = zip(*paths)
for j, (m, other) in enumerate(zip(measurements, zipped)):
if j <= 2:
print other
if m.pos == (0,0,0):
continue
real_path.append((m.pos.x, m.pos.y, m.pos.z))
for i, o in enumerate(other):
xx = self.env.cube2meter(o[0], o[1], o[2], self.cubewidth)
other_paths[i].append(xx)
return (real_path,) + tuple(other_paths)
class HMMLocalizer(Localizer):
no_signal_delta = 0
def __init__(self, env, transition_shape=(5, 5, 3), cubewidth=3,
prune_to=1000, freespace_scan=-1, **kwargs):
super(HMMLocalizer, self).__init__(env, **kwargs)
self.cubewidth = cubewidth
self.transition_shape = np.array(transition_shape)
self.prune_to = prune_to
self.freespace_scan = freespace_scan
self.emission_probs = None
self.transition_probs = None
self._setupStates()
self.decoder = None
self.last_measurement = None
def _setupStates(self):
vi_dimensions = self.env.aps.values()[0].vi.di
self.uncubed_data_shape = (vi_dimensions.x, vi_dimensions.y, vi_dimensions.z)
t = time.time()
self.emission_probs = speed.buildStatesWithEmissionProbs(self.uncubed_data_shape, self.apdata_list, self.cubewidth, 1, pooled_variance=2)
self.cubed_data_shape = self.emission_probs.shape[:3]
log.debug('build emission probs in %.3f sec' % (time.time() - t))
self.blockedCubes = blockedCubes = self._buildTriangleCubeIntersection()
speed.evalUnreachableAirCubes(blockedCubes, cube_height=self.env.di.sz * self.cubewidth, max_reachable_height=1.8)
#~ blockedCubes[:, :, :] = 0
t = time.time()
self.transition_probs = speed.buildTransitionProbs(self.cubed_data_shape,
self.transition_shape, blockedCubes, self.cubewidth,
freespace_scan=self.freespace_scan)
if isinstance(self.prune_to, float):
# it's a float ratio, relative to num states
self.prune_to = int(self.prune_to * (self.cubed_data_shape[0] * self.cubed_data_shape[1] * self.cubed_data_shape[2]))
#~ for x, y, z in zip(*np.where(blockedCubes > 0)):
#~ self.emission_probs[x, y, z, :, :] = 0.000001
log.debug('build transition probs in %.3f sec' % (time.time() - t))
def analyzePath(self, cubes, measurements, clf=True):
'''TODO: gui-specific
return list of emission + transition cost
'''
eprobs = self.emission_probs
tprobs = self.transition_probs
ox, oy, oz = self.transition_shape / 2
x, y, z = cubes[0]
log_p = self.decoder.getEmissionCost(eprobs[x, y, z, 0], measurements[0])
#~ means, sigmas = eprobs[x, y, z][0], eprobs[x, y, z][1]
#~ log_p = gauss_nd_in_logspace(means, sigmas, measurements[0], reduced=True)
def _fmt(a):
return ','.join('%.1f' % e for e in a)
plot_xs = range(len(cubes))
plot_ys = [0]
deltas = [0]
for (x1, y1, z1), (x2, y2, z2), values in zip(cubes, cubes[1:], measurements[1:]):
dx, dy, dz = x2-x1, y2-y1, z2-z1
#~ if abs(dx) == 2 or abs(dy) == 2 or abs(dz) == 2:
#~ print 'jump 2'
#~ elif dx == 0 and dy == 0 and dz == 0:
#~ print 'jump 0'
tx, ty, tz = ox + dx, oy + dy, oz + dz
# Hmm, i dont understand why I use x1 instead of x2 here...
tp = tprobs[x1, y1, z1, tx, ty, tz]
#~ means, sigmas = eprobs[x2, y2, z2][0], eprobs[x2, y2, z2][1]
ep = self.decoder.getEmissionCost(eprobs[x2, y2, z2, 0], values)
#~ ep = gauss_nd_in_logspace(means, sigmas, values, reduced=True)
log_p = log_p + tp + ep
deltas.append(tp + ep)
plot_ys.append(log_p)
#~ print '%s %s %s tp: %.1f ep: %.1f total: %.1f' % (x2, y2, z2, tp, ep, log_p)
if GUI is not None:
def doStuff():
if clf:
self.gui.plot.clf()
ax1 = self.gui.plot.add_subplot(111)
ax1.grid()
#~ ax1.set_xlim(min(plot_xs), max(plot_xs))
#~ ax1.set_ylim(min(plot_ys), max(plot_ys))
ax1.plot(plot_xs[:len(plot_ys)], plot_ys[:len(plot_xs)])
GUI.invoke_later(doStuff)
return deltas
def averageEstimatedPaths(estimated_paths, SIZE=2900):
''' combine a number of paths into a new path
* does not lead to interesting results
'''
summed = [None] * len(estimated_paths[0])
for i in range(SIZE):
estimated_path = estimated_paths[i]
for j, ((x, y, z), p) in enumerate(estimated_path):
if summed[j] is None:
summed[j] = [0, 0, 0]
summed[j][0] = summed[j][0] + x
summed[j][1] = summed[j][1] + y
summed[j][2] = summed[j][2] + z
for j, (x, y, z) in enumerate(summed):
summed[j][0] = int(x / SIZE)
summed[j][1] = int(y / SIZE)
summed[j][2] = int(z / SIZE)
estimated_path = [(e, 0) for e in summed]
return estimated_path
def localizeGui(self, start, end):
measurements = Measurements()
measurements.loadFromString(self.gui.synthetic_measurements)
measurements = Measurements(measurements[start: end])
t = time.time()
#~ print 'start viterbi decoding'
#~ estimated_path = debuggable_viterbi_decode(self, measurements)
res, measurements = self.evaluateMeasurements(measurements, interpolate=True)
estimated_path, seq_estimated_path, seq_avg_estimated_path = res['end'], res['seq'], res['seq_avg']
print estimated_path[-3:]
log.info('viterbi decode in %.3f sec' % (time.time() - t))
## the estimated
costs = self.analyzePath(estimated_path, measurements.asArray(self.apids))
real_path, est_path, seq_est_path, seq_avg_path = self.toRealWordPaths(
measurements, estimated_path, seq_estimated_path, seq_avg_estimated_path)
errors = errorsFromLists(real_path, est_path)
err2d = errorsFromLists(real_path, est_path, mode='2d')
assert len(errors) == len(real_path)
log.info('avg_err_2d: %.2fm avg_err_3d: %.2fm' % (sum(err2d) / len(err2d), sum(errors) / len(errors)))
cube_values = errors
cube_values = None
self.analyzeEstimatedPath(estimated_path, cube_values=cube_values)
errors = errorsFromLists(real_path, seq_est_path)
err2d = errorsFromLists(real_path, seq_est_path, mode='2d')
log.info('avg_seq_err_2d: %.2fm avg_seq_err_3d: %.2fm' % (sum(err2d) / len(err2d), sum(errors) / len(errors)))
errors = errorsFromLists(real_path, seq_avg_path)
err2d = errorsFromLists(real_path, seq_avg_path, mode='2d')
log.info('avg_seq_avg_err_2d: %.2fm avg_seq_avg_err_3d: %.2fm' % (sum(err2d) / len(err2d), sum(errors) / len(errors)))
## the real
#~ deltas2 = self.analyzePath(getCubesFromPath(self.gui, self.env), measurements_a, clf=False)
#~ estimated_path2 = getCubesFromPath(self.gui, self.env)
#~ self.analyzeEstimatedPath(estimated_path2, cube_values=deltas2, verbose=False)
def evaluateMeasurements(self, measurements, interpolate=False):
# TODO: share decoder with threadlocal?
if interpolate:
measurements.interpolateSignals()
num_non_spawned = len(measurements)
cd = self.cubewidth * self.env.di.sx
#~ num_non_spawned = len(measurements)
measurements = measurements.spawn(walking_speed=self.walking_speed, cube_distance=cd)
if self.verbose:
log.info('walk: %sm/s, cd: %.3fm, measurements: %s/%s' % (self.walking_speed, cd, len(measurements), num_non_spawned))
self.decoder = decoder = speed.ViterbiDecoder(self.blockedCubes, self.emission_probs, self.transition_probs,
store_pruning=True, verbose=self.verbose, no_signal_delta=self.no_signal_delta)
measurements_a = measurements.asArray(self.apids)
if self.max_aps is not None and self.max_aps > -1:
for m in measurements_a:
values = []
for i in range(len(self.apids)):
values.append((m[i], i))
# descending sorted moving specials to end
top_value_idxs = [e[1] for e in sorted(values, key=lambda x: -100 if x[0] > 0 else -x[0])]
top_value_idxs = set(top_value_idxs[:self.max_aps])
for i in range(len(self.apids)):
if not i in top_value_idxs and m[i] < 0:
m[i] = 2
log.info('using %s of %s aps' % (self.max_aps, len(self.apids)))
estimated_path, seq_estimated_path, avg_seq_estimated_path = decoder.decode(measurements_a, self.prune_to, self.num_threads, full=False)
#~ self.analyzePath(estimated_path, measurements)
#~ for hist in decoder.history:
#~ seq_estimated_path = [tuple(hist[-1][:3]) for hist in decoder.history]
return {'end': estimated_path, 'seq': seq_estimated_path, 'seq_avg': avg_seq_estimated_path}, measurements
def reset(self):
self.decoder = None
self.last_measurement = None
def evaluateNext(self, measurements):
if self.decoder is None:
self.decoder = decoder = speed.ViterbiDecoder(self.blockedCubes, self.emission_probs, self.transition_probs,
store_pruning=True, verbose=self.verbose, no_signal_delta=self.no_signal_delta)
continueDecoding = False
else:
decoder = self.decoder
continueDecoding = True
if self.last_measurement is not None:
measurements.insert(0, self.last_measurement)
cd = self.cubewidth * self.env.di.sx
measurements = measurements.spawn(walking_speed=self.walking_speed, cube_distance=cd)
measurements = Measurements(measurements[1:])
self.last_measurement = measurements[-1]
measurements_a = measurements.asArray(self.apids)
res = decoder.decode(measurements_a, self.prune_to, self.num_threads, full=False, continueDecoding=continueDecoding)
return {'end': res[0], 'seq': res[1], 'seq_avg': res[2]}, measurements
def gauss_nd(means, sq_sigmas, values):
p = 1
for mu, sq_sigma, x in zip(means, sq_sigmas, values):
if x == 2:
continue
elif x == 1: # no measurement
continue
p = p * math.sqrt(2 * math.pi * sq_sigma) * math.exp(-(x - mu)**2 / (2 * sq_sigma))
return p
def debuggable_particle_filter(measurements, emission_probs, num_particles, cubed_data_shape, seed=1234):
seq_res = []
sigmas = [9] * len(measurements[0])
np.random.seed(seed)
particles = np.dstack(np.random.randint(0, size, num_particles) for size in cubed_data_shape)[0]
weights = np.zeros((num_particles, ), dtype=np.double)
SIZE_POOL = 10000
sampling_pool = np.random.multivariate_normal([0,0,0], [[5,0,0],[0,5,0],[0,0,2]], SIZE_POOL)
sampling_pool_idx = 0
for j, xs in enumerate(measurements):
print 'm: %s of %s' % (j, len(measurements))
w_max = 0
w_news = []
for p in particles:
if p[0] == -1:
continue
mus = emission_probs[p[0], p[1], p[2], 0]
#~ w_new = w * gauss_nd(mus, sigmas, xs)
w_new = gauss_nd(mus, sigmas, xs)
w_news.append(w_new)
if w_new > w_max:
w_max = w_new
p_best = p
seq_res.append(p_best)
# renormalize
sum_w_new = sum(w_news)
for i, w in enumerate(w_news):
weights[i] = w / sum_w_new
new_particles = np.zeros((num_particles, 3))
total_spawned = 0
for p, w in zip(particles, weights):
num_spawn_particles = int(round(num_particles * w, 0))
# prevent overflow, due to rounding
if total_spawned + num_spawn_particles > num_particles:
num_spawn_particles = num_particles - total_spawned
idx = total_spawned
num_spawned = 0
while num_spawned < num_spawn_particles:
dx, dy, dz = sampling_pool[sampling_pool_idx % SIZE_POOL]
sampling_pool_idx += 1
new_x = p[0] + int(round(dx, 0))
new_y = p[1] + int(round(dy, 0))
new_z = p[2] + int(round(dz, 0))
if (0 <= new_x < cubed_data_shape[0]
and 0 <= new_y < cubed_data_shape[1]
and 0 <= new_z < cubed_data_shape[2]):
new_particles[idx, 0] = new_x
new_particles[idx, 1] = new_y
new_particles[idx, 2] = new_z
idx += 1
num_spawned += 1
total_spawned += num_spawn_particles
for i in range(total_spawned, num_particles):
particles[i][0] = -1
particles = new_particles
print seq_res
return seq_res, seq_res
def viewPFTransitionsAtPos(gui, pflocalizer):
t = time.time()
sampling_pool = pflocalizer.buildSamplingPool()
pool_size = len(sampling_pool)
blockedCubes = pflocalizer.blockedCubes
px = gui.state_x
py = gui.state_y
pz = gui.state_z
s_shape_x, s_shape_y, s_shape_z = pflocalizer.cubed_data_shape
sample_histogram = np.zeros(pflocalizer.cubed_data_shape)
local_block_map = np.zeros(pflocalizer.cubed_data_shape)
lround = lambda x: int(round(x, 0))
UN_BLOCKED = 0
OVER_SAMPLE = 5
idx = 0
num_spawn_particles = 1000
num_spawned = 0
sampling_pool_idx = 0
for i in range(num_spawn_particles * OVER_SAMPLE): # limit retries
if num_spawned == num_spawn_particles:
break
curr_pool_idx = sampling_pool_idx % pool_size
dx = sampling_pool[curr_pool_idx, 0]
dy = sampling_pool[curr_pool_idx, 1]
dz = sampling_pool[curr_pool_idx, 2]
sampling_pool_idx += 1
dxi = lround(dx)
dyi = lround(dy)
dzi = lround(dz)
new_px = px + dxi
new_py = py + dyi
new_pz = pz + dzi
# check if we are still in the allowed space
if not (0 <= new_px < s_shape_x and 0 <= new_py < s_shape_y and 0 <= new_pz < s_shape_z):
continue # try again
if local_block_map[new_px, new_py, new_pz] == 1:
continue # try again
# check if destination is blocked
if blockedCubes[new_px, new_py, new_pz] != UN_BLOCKED:
continue # try again
# scan space between p and new_p for BLOCKED cubes
# and reject sample if True
distance = math.sqrt(dxi * dxi + dyi * dyi + dzi * dzi) # euclidian
if distance == 0:
continue
step_x = dxi / distance
step_y = dyi / distance
step_z = dzi / distance
curr_x = px
curr_y = py
curr_z = pz
curr_x_i = int(curr_x); curr_y_i = int(curr_y); curr_z_i = int(curr_z)
blocked_path = 0
for ii in range(int(distance)):
#~ while not(curr_x_i == new_px or curr_y_i == new_py or curr_z_i == new_pz):
curr_x = curr_x + step_x
curr_y = curr_y + step_y
curr_z = curr_z + step_z
curr_x_i = int(curr_x); curr_y_i = int(curr_y); curr_z_i = int(curr_z)
#~ print curr_x, curr_y, curr_z, ':', dx, dy, dz, ':', px, py, pz
if 0 <= curr_x_i < s_shape_x and 0 <= curr_y_i < s_shape_y and 0 <= curr_z_i < s_shape_z:
if blockedCubes[curr_x_i, curr_y_i, curr_z_i] != UN_BLOCKED:
blocked_path = 1
local_block_map[curr_x_i, curr_y_i, curr_z_i] = 1
break
if blocked_path == 1:
# mark all cubes after the blocked hit also also blocked
for ii in range(ii, int(distance)):
curr_x = curr_x + step_x
curr_y = curr_y + step_y
curr_z = curr_z + step_z
curr_x_i = int(curr_x); curr_y_i = int(curr_y); curr_z_i = int(curr_z)
if 0 <= curr_x_i < s_shape_x and 0 <= curr_y_i < s_shape_y and 0 <= curr_z_i < s_shape_z:
local_block_map[curr_x_i, curr_y_i, curr_z_i] = 1
#~ print 'blocked..., resample'
continue # try again
log.info('got %s' % num_spawned)
# ok, we got a valid sample
#~ new_particles[idx, 0] = new_px
#~ new_particles[idx, 1] = new_py
#~ new_particles[idx, 2] = new_pz
sample_histogram[new_px, new_py, new_pz] += 1
idx += 1
num_spawned += 1
if i == num_spawn_particles * OVER_SAMPLE - 1:
log.info('OVER_SAMPLE limit reached')
log.info('needed %s samplings in %.3f secs' % (sampling_pool_idx, time.time() - t))
log.info('uncube...')
data = speed.uncube(sample_histogram, pflocalizer.uncubed_data_shape)
def doStuff():
log.info('visualize...')
gui.scalar_field.mlab_source.scalars = data
#~ gui.surface_number_of_contours = 10
#~ gui.surface_view.visible = True
GUI.invoke_later(doStuff)
class PFLocalizer(Localizer):
def __init__(self, env, cubewidth, with_history=False, num_particles=20000, transition_sigmas=(4.0, 4.0, 2.0),
avg_over_particles=50, do_blocking=True, smooth=1.0, emission_sigma=5.0,
turnoff_blocking=1e3, replace_below=0, max_replace=100, **kwargs):
super(PFLocalizer, self).__init__(env, **kwargs)
self.num_particles = num_particles
self.cubewidth = cubewidth
self._setupStates()
self.with_history = with_history
self.transition_sigmas = transition_sigmas
self.do_blocking = do_blocking
self.smooth = smooth
self.emission_sigma = emission_sigma
self.avg_over_particles = avg_over_particles
self.turnoff_blocking = turnoff_blocking
self.replace_below = replace_below
self.max_replace = max_replace
def buildSamplingPool(self):
cube_distance = self.cubewidth * self.env.di.sx
# sigma in real space (meter)
sigma_x, sigma_y, sigma_z = self.transition_sigmas
#adapt sigma to cube space
sigma_x_cube = sigma_x / cube_distance
sigma_y_cube = sigma_y / cube_distance
sigma_z_cube = sigma_z / cube_distance
return np.random.multivariate_normal([0,0,0], [[sigma_x_cube,0,0],[0,sigma_y_cube,0],[0,0,sigma_z_cube]], 20000)
def params(self):
vals = (self.cubewidth, self.num_particles, self.transition_sigmas, self.smooth,
self.do_blocking, self.emission_sigma, self.avg_over_particles,
self.turnoff_blocking, self.replace_below, self.max_replace)
return 'cubewidth:%s num_particles:%s transition_sigmas:%s smooth:%s do_blocking:%s emission_sigma:%s avg_over_particles:%s turnoff_blocking:%s replace_below:%s max_replace:%s' % vals
def localizeGui(self, start, end):
#~ return
measurements = Measurements()
#~ self.do_blocking = False
measurements.loadFromString(self.gui.synthetic_measurements)
measurements = Measurements(measurements[start: end])
res, measurements = self.evaluateMeasurements(measurements)
estimated_path, seq_estimated_path, seq_avg_estimated_path = res['end'], res['seq'], res['seq_avg']
if res['seq_avg'] is not None:
seq_avg_estimated_path = res['seq_avg']
else:
seq_avg_estimated_path = [(0,0,0)] * len(estimated_path)
print estimated_path[-3:]
real_path, est_path, seq_path, seq_avg_path = self.toRealWordPaths(
measurements, estimated_path, seq_estimated_path, seq_avg_estimated_path)
errors = errorsFromLists(real_path, est_path)
err2d = errorsFromLists(real_path, est_path, mode='2d')
assert len(errors) == len(real_path)
log.info('avg_err_2d: %.2fm avg_err_3d: %.2fm' % (sum(err2d) / len(err2d), sum(errors) / len(errors)))
self.analyzeEstimatedPath(estimated_path, cube_values=None)
errors = errorsFromLists(real_path, seq_path)
err2d = errorsFromLists(real_path, seq_path, mode='2d')
log.info('avg_seq_err_2d: %.2fm avg_seq_err_3d: %.2fm' % (sum(err2d) / len(err2d), sum(errors) / len(errors)))
if res['seq_avg'] is not None:
errors = errorsFromLists(real_path, seq_avg_path)
err2d = errorsFromLists(real_path, seq_avg_path, mode='2d')
log.info('avg_seq_avg_err_2d: %.2fm avg_seq_avg_err_3d: %.2fm' % (sum(err2d) / len(err2d), sum(errors) / len(errors)))
def evaluateMeasurements(self, measurements, interpolate=False):
if interpolate:
measurements.interpolateSignals()
num_non_spawned = len(measurements)
#~ measurements = measurements.spawn(cube_distance=self.cubewidth * self.env.di.sx)
#~ print measurements
measurements_a = measurements.asArray(self.apids)
# this has to be moved
np.random.seed(1234)
sampling_pool = self.buildSamplingPool()
self.turnoff_blocking = True
if self.verbose:
log.info('measurements: %s/%s, num_particles: %s, sigmas: %s, blocking: %s' % (len(measurements_a), num_non_spawned, self.num_particles, self.transition_sigmas, not self.turnoff_blocking))
self.decoder = pf = speed.ParticleFilter(self.blockedCubes, self.emission_probs,
sampling_pool, verbose=self.verbose, with_history=self.with_history)
t = time.time()
(estimated_path,
seq_estimated_path,
seq_avg_estimated_path) = pf.decode(measurements_a,
self.num_particles,
do_blocking=self.do_blocking,
average_over_particles=self.avg_over_particles,
smooth=self.smooth,
replace_below=self.replace_below,
max_replace=self.max_replace,
sigma=self.emission_sigma,
turnoff_blocking=self.turnoff_blocking)
#~ for p1, p2 in zip(seq_estimated_path, seq_avg_estimated_path):
#~ print p1, ':', p2
#~ print seq_res
#~ assert seq_res[4][1] == 32
#~ print time.time() - t
#~ import os; os._exit(0)
#~ seq_res, seq_res = debuggable_particle_filter(measurements, self.emission_probs, self.num_particles, self.cubed_data_shape)
return {'end': estimated_path, 'seq': seq_estimated_path, 'seq_avg': seq_avg_estimated_path}, measurements
class LMSELocalizer(Localizer):
def __init__(self, env, cubewidth, with_history=False, **kwargs):
super(LMSELocalizer, self).__init__(env, **kwargs)
self.with_history = with_history
self.cubewidth = cubewidth
self._setupStates()
def _setupStates(self):
vi_dimensions = self.env.aps.values()[0].vi.di
self.uncubed_data_shape = (vi_dimensions.x, vi_dimensions.y, vi_dimensions.z)
t = time.time()
#~ self.emission_probs = np.load('r:/eprobs.npy')
self.emission_probs = speed.buildStatesWithEmissionProbs(self.uncubed_data_shape, self.apdata_list, self.cubewidth, 1, pooled_variance=2)
#~ np.save('r:/eprobs.npy', self.emission_probs)
self.cubed_data_shape = self.emission_probs.shape[:3]
log.debug('build emission probs in %.3f sec' % (time.time() - t))
#~ self.blockedCubes = blockedCubes = self._buildTriangleCubeIntersection()
#~ speed.evalUnreachableAirCubes(blockedCubes, cube_height=self.env.di.sz * self.cubewidth, max_reachable_height=2.0)
def evaluateMeasurements(self, measurements, interpolate=False):
if interpolate:
measurements.interpolateSignals()
measurements_a = measurements.asArray(self.apids)
if self.verbose:
log.info('measurements: %s, interpolate: %s' % (len(measurements_a), interpolate))
self.decoder = lmse = speed.LMSE(self.emission_probs, with_history=self.with_history, verbose=self.verbose)
seq_estimated_path = lmse.decode(measurements_a)
return {'seq': seq_estimated_path}, measurements
def evaluateNext(self, measurements):
return self.evaluateMeasurements(measurements, interpolate=False)
def localizeGui(self, start, end):
measurements = Measurements()
measurements.loadFromString(self.gui.synthetic_measurements)
measurements = Measurements(measurements[start: end])
res, measurements = self.evaluateMeasurements(measurements)
seq_estimated_path = res['seq']
print seq_estimated_path[-3:]
seq_avg_estimated_path = seq_estimated_path
## the estimated
real_path, seq_path, seq_avg_path = self.toRealWordPaths(
measurements, seq_estimated_path, seq_avg_estimated_path)
errors = errorsFromLists(real_path, seq_path)
err2d = errorsFromLists(real_path, seq_path, mode='2d')
log.info('avg_seq_err_2d: %.2fm avg_seq_err_3d: %.2fm' % (sum(err2d) / len(err2d), sum(errors) / len(errors)))
print seq_estimated_path
self.analyzeEstimatedPath(seq_estimated_path, cube_values=None)
#~ if res['seq_avg'] is not None:
#~ errors = errorsFromLists(real_path, seq_avg_path)
#~ err2d = errorsFromLists(real_path, seq_avg_path, mode='2d')
#~ log.info('avg_seq_avg_err_2d: %.2fm avg_seq_avg_err_3d: %.2fm' % (sum(err2d) / len(err2d), sum(errors) / len(errors)))
def localizeGui(gui, _log, algo, start, end):
try:
global log; log = _log
env = Environment(gui.objfile, aps=getAPsFromGui(gui), vi_path=gui.apdatafiles, tmpdir=gui.tmpdir)
if algo == 'hmm':
localizer = HMMLocalizer(env, gui=gui, cubewidth=gui.cube_width, prune_to=gui.prune_to,
num_threads=gui.threads, freespace_scan=gui.freespace_scan)
elif algo == 'pf':
localizer = PFLocalizer(env, gui=gui, cubewidth=gui.cube_width, with_history=True)
elif algo == 'lmse':
localizer = LMSELocalizer(env, gui=gui, cubewidth=gui.cube_width, with_history=True)
localizer.localizeGui(start, end)
#~ localizer = StreamingHMMLocalizer(env, gui=gui, cubewidth=gui.cube_width, prune_to=gui.prune_to, num_threads=gui.threads)
#~ localizer.test()
except Exception:
log.exception('error happened')
def viewBlockedCubes(gui, _log):
try:
global log
log = _log
print '---'
env = Environment(gui.objfile, tmpdir=gui.tmpdir, aps=getAPsFromGui(gui), vi_path=gui.apdatafiles)
log.info('setup localizer...')
localizer = HMMLocalizer(env, gui=gui, cubewidth=gui.cube_width, prune_to=gui.prune_to, num_threads=gui.threads)
log.info('uncube...')
data = speed.uncube(localizer.blockedCubes.astype(np.double), localizer.uncubed_data_shape)
# aggregate all transition probs
#~ combined_transition_probs[:, :, :10] = 0
#~ combined_transition_probs[:, :, 11:] = 0
#~ data = speed.uncube(combined_transition_probs, self.uncubed_data_shape)
#~ print blockedCubes.shape
def doStuff():
log.info('visualize...')
gui.surface_view.visible = False
updateScalarField(env, gui, data)
GUI.invoke_later(doStuff)
except Exception:
log.exception('error happened')
def getAPsFromGui(gui, assertLoadable=True):
if not hasattr(gui, '_aps'):
gui._aps = {}
#~ gui._aps.clear()
device = gui.device_name
optrun = gui.optrun_session
# cache ap at gui - since aps cache their normalization values
# we build one for each device
for ap in gui.aps:
if not ap.used:
gui._aps.pop((optrun, device, ap.id), None)
else:
if (optrun, device, ap.id) in gui._aps:
continue
_ap = AccessPoint(name=ap.id, x=ap.x, y=ap.y, z=ap.z, volumeImagePath=gui.apdatafiles, davalues=gui.davalues.get(device, []))
if assertLoadable:
assert _ap.vi is not None
gui._aps[(optrun, device, ap.id)] = _ap
return [ap for (o, d, apid), ap in gui._aps.items() if d == device and o == optrun]
def getCubesFromPath(gui, env):
cubes = []
for p1, p2 in zip(gui.localization_path, gui.localization_path[1:]):
dx, dy, dz = p2.x - p1.x, p2.y - p1.y, p2.z - p1.z
dist = (dx**2 + dy**2 + dz**2)**0.5
one_step = gui.cube_width * gui.res_dx * 0.5
num_steps = int(dist / one_step)
if num_steps == 0:
continue
x_step, y_step, z_step = dx / num_steps, dy / num_steps, dz / num_steps
for i in range(0, num_steps):
rx, ry, rz = p1.x + i * x_step , p1.y + i * y_step, p1.z + i * z_step
x, y, z = env.translateToVoxel(rx, ry, rz, gui.cube_width)
#~ print i, x, y, z, '->', x, y, z
if x < gui.cube_res_x and y < gui.cube_res_y and z < gui.cube_res_z:
if not cubes or not(cubes[-1][0] == x and cubes[-1][1] == y and cubes[-1][2] == z):
cubes.append((x, y, z))
#~ print 123, len(cubes), env.di
return np.array(cubes)
def buildPathCubes(gui, env=None):
tx, ty, tz = gui.bbox.x1, gui.bbox.y1, gui.bbox.z1
dx, dy, dz = gui.res_dx, gui.res_dy, gui.res_dz
if env is None:
env = Environment(di=Dimensions(tx, ty, tz, dx, dy, dz))
data = np.zeros((gui.cube_res_x, gui.cube_res_y, gui.cube_res_z))
cubes = getCubesFromPath(gui, env)
#~ print '345', len(cubes)
for i, (x, y, z) in enumerate(cubes, max(1, int(len(cubes) * 1.0 / gui.surface_number_of_contours))):
data[x, y, z] = i
uncubeddata = speed.uncube(data, (gui.res_x, gui.res_y, gui.res_z))
gui.scalar_field.mlab_source.scalars = uncubeddata
return data
def localizeByDistance(env):
coords = [(31.3, 11.0, 1.2),
(38.0, 11.0, 1.2),
(37.8, 5.5, 1.2),
(50.1, 5.5, 1.2),
(50.0, 11.0, 1.2),
(42.0, 12.0, 1.2)]
#~ env.extractWalkableArea()
with_walkable = False
NOISE = 3.0
#~ measurement = env.generateSyntheticMeasurementsFromCorners(coords, noise=NOISE)
#~ measurement.save('r:/thepath.txt')
measurement = Measurements()
measurement.load(r'N:\lws\tmp\path_iconia_0.txt')
#~ measurement.load('r:/thepath.txt')
#~ asd
#~ measurement.plot()
#~ env.replaceAPs(VI_PATH_EMPTY)
l = Localizer(env)
estimated_path = []
real_path = []
for i, m in enumerate(measurement):
x, y, z = l.localize(m, with_walkable=with_walkable)
estimated_path.append((x, y, z))
real_path.append(m.pos)
print '--------------'
print i
print '--------------'
print x, y, z
#~ print 'wa:', env.walkable_area[env.translateToVoxel(x, y, z)]
if len(estimated_path) > 1:
zs = [p[2] for p in estimated_path]
avg_z = sum(zs) / len(zs) + 0.5
cutfile = env.getCutFile(avg_z)
img = Image.open(cutfile)
img = img.convert('RGBA')
draw = ImageDraw.Draw(img)
for (x1, y1, z1), (x2, y2, z2) in zip(estimated_path, estimated_path[1:]):
m2dx1, m2dy1 = env.translateMeterToMesh2dPixel(x1, y1)
m2dx2, m2dy2 = env.translateMeterToMesh2dPixel(x2, y2)
draw.line((m2dx1, m2dy1, m2dx2, m2dy2), fill='#FF0000')
#~ for (x1, y1, z1), (x2, y2, z2) in zip(estimated_path, real_path):
#~ m2dx1, m2dy1 = env.translateMeterToMesh2dPixel(x1, y1)
#~ m2dx2, m2dy2 = env.translateMeterToMesh2dPixel(x2, y2)
#~ draw.line((m2dx1, m2dy1, m2dx2, m2dy2), fill='#FF0000')
#~ # draw.text((m2dx2-20, m2dy2), '%.4f' % wa_modifier)
#~ for (x1, y1, z1), (x2, y2, z2) in zip(real_path, real_path[1:]):
#~ m2dx1, m2dy1 = env.translateMeterToMesh2dPixel(x1, y1)
#~ m2dx2, m2dy2 = env.translateMeterToMesh2dPixel(x2, y2)
#~ draw.line((m2dx1, m2dy1, m2dx2, m2dy2), fill='#00FF00')
#~ for name, ap in env.aps.items():
#~ m2dx, m2dy = env.translateMeterToMesh2dPixel(ap.x, ap.y)
#~ draw.ellipse((m2dx-2, m2dy-2, m2dx+2, m2dy+2), fill='#0000FF')
#~ draw.text((m2dx-10, m2dy-15), name, fill='#0000FF')
used_aps = ','.join(env.aps.keys())
draw.text((10, 10), 'z: %.1f aps: %s, noise: %s db' % (avg_z, used_aps, NOISE), fill='#CC1111')
#~ env.walkable_area[:,:,avg]
img.save('r:/out_%s_%s.png' % (NOISE, with_walkable))
errors = errorsFromLists(estimated_path, real_path)
open('r:\errors.txt', 'w').write('\n'.join(str(e) for e in errors))
def testPruningList():
t = time.time()
speed.testFastPruningInsertinsert(100000, 1800000)
print 'total:', time.time() - t
def viewSignalGradients(gui, _log):
global log; log = _log
aps = getAPsFromGui(gui)
uncubed_data_shape = (gui.resolution.x, gui.resolution.y, gui.resolution.z)
to_be_summed = []
for ap in aps:
if not ap.name in ['107']:
continue
CUT_OFF = -80
log.info('applying sobel on %s [cutoff: %s]' % (ap.name, CUT_OFF))
data = np.array(ap.dbdata)
sig_too_low = np.where(data < CUT_OFF)
data[sig_too_low] = CUT_OFF
to_be_summed.append(np.abs(ndimage.filters.sobel(data * -1)))
#~ ap_count = np.zeros(shape=ap.dbdata.shape, dtype=np.double)
#~ ap_count[np.where(ap.dbdata > -100)] = 1
#~ to_be_summed.append(ap_count)
log.info('reduce...')
data = np.add.reduce(np.array(to_be_summed))
log.info('clearing gradients in blocked cubes')
CUBE_WIDTH = 1
CACHE_FILE = gui.tmpdir.joinpath('blocked_cubes_%s.npy' % CUBE_WIDTH)
blocked_cubes = np.load(CACHE_FILE)
#~ blocked_cubes = speed.uncube(np.load(CACHE_FILE).astype(np.double), uncubed_data_shape)
#~ blocked_cubes = ndimage.maximum_filter(blocked_cubes, size=3)
#~ blocked_cubes = ndimage.maximum_filter1d(blocked_cubes, size=3, axis=0)
#~ blocked_cubes = ndimage.maximum_filter1d(blocked_cubes, size=3, axis=1)
blocked_cubes = ndimage.maximum_filter1d(blocked_cubes, size=7, axis=2)
blocked_idx = np.where(blocked_cubes > 0) # 0 == UN_BLOCKED
data[blocked_idx] = 0
#~ data = np.add.reduce(np.array(to_be_summed)) / len(to_be_summed)
if data.shape != uncubed_data_shape:
log.info('uncube...')
data = speed.uncube(data, uncubed_data_shape)
def doStuff():
log.info('visualize...')
gui.scalar_field.mlab_source.scalars = data
#~ gui.surface_number_of_contours = 10
#~ gui.surface_view.visible = True
GUI.invoke_later(doStuff)
def fullPruniningAnimation(localizer, step=1, count=1000, delay=0):
def _run():
time.sleep(delay)
for i in range(0, len(localizer.decoder.history), step):
localizer.viewHistory(i, count, store=True)
time.sleep(0.1)
daemonthread(target=_run)