initial commit
This commit is contained in:
0
core/utils/__init__.py
Normal file
0
core/utils/__init__.py
Normal file
233
core/utils/augmentor.py
Normal file
233
core/utils/augmentor.py
Normal file
@@ -0,0 +1,233 @@
|
||||
import numpy as np
|
||||
import random
|
||||
import math
|
||||
import cv2
|
||||
from PIL import Image
|
||||
|
||||
import torch
|
||||
import torchvision
|
||||
import torch.nn.functional as F
|
||||
|
||||
class FlowAugmentor:
|
||||
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5):
|
||||
self.crop_size = crop_size
|
||||
self.augcolor = torchvision.transforms.ColorJitter(
|
||||
brightness=0.4,
|
||||
contrast=0.4,
|
||||
saturation=0.4,
|
||||
hue=0.5/3.14)
|
||||
|
||||
self.asymmetric_color_aug_prob = 0.2
|
||||
self.spatial_aug_prob = 0.8
|
||||
self.eraser_aug_prob = 0.5
|
||||
|
||||
self.min_scale = min_scale
|
||||
self.max_scale = max_scale
|
||||
self.max_stretch = 0.2
|
||||
self.stretch_prob = 0.8
|
||||
self.margin = 20
|
||||
|
||||
def color_transform(self, img1, img2):
|
||||
|
||||
if np.random.rand() < self.asymmetric_color_aug_prob:
|
||||
img1 = np.array(self.augcolor(Image.fromarray(img1)), dtype=np.uint8)
|
||||
img2 = np.array(self.augcolor(Image.fromarray(img2)), dtype=np.uint8)
|
||||
|
||||
else:
|
||||
image_stack = np.concatenate([img1, img2], axis=0)
|
||||
image_stack = np.array(self.augcolor(Image.fromarray(image_stack)), dtype=np.uint8)
|
||||
img1, img2 = np.split(image_stack, 2, axis=0)
|
||||
|
||||
return img1, img2
|
||||
|
||||
def eraser_transform(self, img1, img2, bounds=[50, 100]):
|
||||
ht, wd = img1.shape[:2]
|
||||
if np.random.rand() < self.eraser_aug_prob:
|
||||
mean_color = np.mean(img2.reshape(-1, 3), axis=0)
|
||||
for _ in range(np.random.randint(1, 3)):
|
||||
x0 = np.random.randint(0, wd)
|
||||
y0 = np.random.randint(0, ht)
|
||||
dx = np.random.randint(bounds[0], bounds[1])
|
||||
dy = np.random.randint(bounds[0], bounds[1])
|
||||
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
|
||||
|
||||
return img1, img2
|
||||
|
||||
def spatial_transform(self, img1, img2, flow):
|
||||
# randomly sample scale
|
||||
|
||||
ht, wd = img1.shape[:2]
|
||||
min_scale = np.maximum(
|
||||
(self.crop_size[0] + 1) / float(ht),
|
||||
(self.crop_size[1] + 1) / float(wd))
|
||||
|
||||
max_scale = self.max_scale
|
||||
min_scale = max(min_scale, self.min_scale)
|
||||
|
||||
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
|
||||
scale_x = scale
|
||||
scale_y = scale
|
||||
if np.random.rand() < self.stretch_prob:
|
||||
scale_x *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
|
||||
scale_y *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
|
||||
|
||||
scale_x = np.clip(scale_x, min_scale, None)
|
||||
scale_y = np.clip(scale_y, min_scale, None)
|
||||
|
||||
if np.random.rand() < self.spatial_aug_prob:
|
||||
# rescale the images
|
||||
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
|
||||
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
|
||||
flow = cv2.resize(flow, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
|
||||
flow = flow * [scale_x, scale_y]
|
||||
|
||||
if np.random.rand() < 0.5: # h-flip
|
||||
img1 = img1[:, ::-1]
|
||||
img2 = img2[:, ::-1]
|
||||
flow = flow[:, ::-1] * [-1.0, 1.0]
|
||||
|
||||
if np.random.rand() < 0.1: # v-flip
|
||||
img1 = img1[::-1, :]
|
||||
img2 = img2[::-1, :]
|
||||
flow = flow[::-1, :] * [1.0, -1.0]
|
||||
|
||||
y0 = np.random.randint(-self.margin, img1.shape[0] - self.crop_size[0] + self.margin)
|
||||
x0 = np.random.randint(-self.margin, img1.shape[1] - self.crop_size[1] + self.margin)
|
||||
|
||||
y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0])
|
||||
x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1])
|
||||
|
||||
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
|
||||
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
|
||||
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
|
||||
|
||||
return img1, img2, flow
|
||||
|
||||
def __call__(self, img1, img2, flow):
|
||||
img1, img2 = self.color_transform(img1, img2)
|
||||
img1, img2 = self.eraser_transform(img1, img2)
|
||||
img1, img2, flow = self.spatial_transform(img1, img2, flow)
|
||||
|
||||
img1 = np.ascontiguousarray(img1)
|
||||
img2 = np.ascontiguousarray(img2)
|
||||
flow = np.ascontiguousarray(flow)
|
||||
|
||||
return img1, img2, flow
|
||||
|
||||
|
||||
class FlowAugmentorKITTI:
|
||||
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5):
|
||||
self.crop_size = crop_size
|
||||
self.augcolor = torchvision.transforms.ColorJitter(
|
||||
brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3/3.14)
|
||||
|
||||
self.max_scale = max_scale
|
||||
self.min_scale = min_scale
|
||||
|
||||
self.spatial_aug_prob = 0.8
|
||||
self.eraser_aug_prob = 0.5
|
||||
|
||||
def color_transform(self, img1, img2):
|
||||
image_stack = np.concatenate([img1, img2], axis=0)
|
||||
image_stack = np.array(self.augcolor(Image.fromarray(image_stack)), dtype=np.uint8)
|
||||
img1, img2 = np.split(image_stack, 2, axis=0)
|
||||
return img1, img2
|
||||
|
||||
def eraser_transform(self, img1, img2):
|
||||
ht, wd = img1.shape[:2]
|
||||
if np.random.rand() < self.eraser_aug_prob:
|
||||
mean_color = np.mean(img2.reshape(-1, 3), axis=0)
|
||||
for _ in range(np.random.randint(1, 3)):
|
||||
x0 = np.random.randint(0, wd)
|
||||
y0 = np.random.randint(0, ht)
|
||||
dx = np.random.randint(50, 100)
|
||||
dy = np.random.randint(50, 100)
|
||||
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
|
||||
|
||||
return img1, img2
|
||||
|
||||
def resize_sparse_flow_map(self, flow, valid, fx=1.0, fy=1.0):
|
||||
ht, wd = flow.shape[:2]
|
||||
coords = np.meshgrid(np.arange(wd), np.arange(ht))
|
||||
coords = np.stack(coords, axis=-1)
|
||||
|
||||
coords = coords.reshape(-1, 2).astype(np.float32)
|
||||
flow = flow.reshape(-1, 2).astype(np.float32)
|
||||
valid = valid.reshape(-1).astype(np.float32)
|
||||
|
||||
coords0 = coords[valid>=1]
|
||||
flow0 = flow[valid>=1]
|
||||
|
||||
ht1 = int(round(ht * fy))
|
||||
wd1 = int(round(wd * fx))
|
||||
|
||||
coords1 = coords0 * [fx, fy]
|
||||
flow1 = flow0 * [fx, fy]
|
||||
|
||||
xx = np.round(coords1[:,0]).astype(np.int32)
|
||||
yy = np.round(coords1[:,1]).astype(np.int32)
|
||||
|
||||
v = (xx > 0) & (xx < wd1) & (yy > 0) & (yy < ht1)
|
||||
xx = xx[v]
|
||||
yy = yy[v]
|
||||
flow1 = flow1[v]
|
||||
|
||||
flow_img = np.zeros([ht1, wd1, 2], dtype=np.float32)
|
||||
valid_img = np.zeros([ht1, wd1], dtype=np.int32)
|
||||
|
||||
flow_img[yy, xx] = flow1
|
||||
valid_img[yy, xx] = 1
|
||||
|
||||
return flow_img, valid_img
|
||||
|
||||
def spatial_transform(self, img1, img2, flow, valid):
|
||||
# randomly sample scale
|
||||
|
||||
ht, wd = img1.shape[:2]
|
||||
min_scale = np.maximum(
|
||||
(self.crop_size[0] + 1) / float(ht),
|
||||
(self.crop_size[1] + 1) / float(wd))
|
||||
|
||||
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
|
||||
scale_x = np.clip(scale, min_scale, None)
|
||||
scale_y = np.clip(scale, min_scale, None)
|
||||
|
||||
if np.random.rand() < self.spatial_aug_prob:
|
||||
# rescale the images
|
||||
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
|
||||
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
|
||||
flow, valid = self.resize_sparse_flow_map(flow, valid, fx=scale_x, fy=scale_y)
|
||||
|
||||
if np.random.rand() < 0.5: # h-flip
|
||||
img1 = img1[:, ::-1]
|
||||
img2 = img2[:, ::-1]
|
||||
flow = flow[:, ::-1] * [-1.0, 1.0]
|
||||
valid = valid[:, ::-1]
|
||||
|
||||
margin_y = 20
|
||||
margin_x = 50
|
||||
|
||||
y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0] + margin_y)
|
||||
x0 = np.random.randint(-margin_x, img1.shape[1] - self.crop_size[1] + margin_x)
|
||||
|
||||
y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0])
|
||||
x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1])
|
||||
|
||||
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
|
||||
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
|
||||
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
|
||||
valid = valid[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
|
||||
return img1, img2, flow, valid
|
||||
|
||||
|
||||
def __call__(self, img1, img2, flow, valid):
|
||||
img1, img2 = self.color_transform(img1, img2)
|
||||
img1, img2 = self.eraser_transform(img1, img2)
|
||||
img1, img2, flow, valid = self.spatial_transform(img1, img2, flow, valid)
|
||||
|
||||
img1 = np.ascontiguousarray(img1)
|
||||
img2 = np.ascontiguousarray(img2)
|
||||
flow = np.ascontiguousarray(flow)
|
||||
valid = np.ascontiguousarray(valid)
|
||||
|
||||
return img1, img2, flow, valid
|
275
core/utils/flow_viz.py
Normal file
275
core/utils/flow_viz.py
Normal file
@@ -0,0 +1,275 @@
|
||||
# MIT License
|
||||
#
|
||||
# Copyright (c) 2018 Tom Runia
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to conditions.
|
||||
#
|
||||
# Author: Tom Runia
|
||||
# Date Created: 2018-08-03
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
def make_colorwheel():
|
||||
'''
|
||||
Generates a color wheel for optical flow visualization as presented in:
|
||||
Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
|
||||
URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
|
||||
According to the C++ source code of Daniel Scharstein
|
||||
According to the Matlab source code of Deqing Sun
|
||||
'''
|
||||
|
||||
RY = 15
|
||||
YG = 6
|
||||
GC = 4
|
||||
CB = 11
|
||||
BM = 13
|
||||
MR = 6
|
||||
|
||||
ncols = RY + YG + GC + CB + BM + MR
|
||||
colorwheel = np.zeros((ncols, 3))
|
||||
col = 0
|
||||
|
||||
# RY
|
||||
colorwheel[0:RY, 0] = 255
|
||||
colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY)
|
||||
col = col+RY
|
||||
# YG
|
||||
colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG)
|
||||
colorwheel[col:col+YG, 1] = 255
|
||||
col = col+YG
|
||||
# GC
|
||||
colorwheel[col:col+GC, 1] = 255
|
||||
colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC)
|
||||
col = col+GC
|
||||
# CB
|
||||
colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB)
|
||||
colorwheel[col:col+CB, 2] = 255
|
||||
col = col+CB
|
||||
# BM
|
||||
colorwheel[col:col+BM, 2] = 255
|
||||
colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM)
|
||||
col = col+BM
|
||||
# MR
|
||||
colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR)
|
||||
colorwheel[col:col+MR, 0] = 255
|
||||
return colorwheel
|
||||
|
||||
|
||||
def flow_compute_color(u, v, convert_to_bgr=False):
|
||||
'''
|
||||
Applies the flow color wheel to (possibly clipped) flow components u and v.
|
||||
According to the C++ source code of Daniel Scharstein
|
||||
According to the Matlab source code of Deqing Sun
|
||||
:param u: np.ndarray, input horizontal flow
|
||||
:param v: np.ndarray, input vertical flow
|
||||
:param convert_to_bgr: bool, whether to change ordering and output BGR instead of RGB
|
||||
:return:
|
||||
'''
|
||||
|
||||
flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
|
||||
|
||||
colorwheel = make_colorwheel() # shape [55x3]
|
||||
ncols = colorwheel.shape[0]
|
||||
|
||||
rad = np.sqrt(np.square(u) + np.square(v))
|
||||
a = np.arctan2(-v, -u)/np.pi
|
||||
|
||||
fk = (a+1) / 2*(ncols-1) + 1
|
||||
k0 = np.floor(fk).astype(np.int32)
|
||||
k1 = k0 + 1
|
||||
k1[k1 == ncols] = 1
|
||||
f = fk - k0
|
||||
|
||||
for i in range(colorwheel.shape[1]):
|
||||
|
||||
tmp = colorwheel[:,i]
|
||||
col0 = tmp[k0] / 255.0
|
||||
col1 = tmp[k1] / 255.0
|
||||
col = (1-f)*col0 + f*col1
|
||||
|
||||
idx = (rad <= 1)
|
||||
col[idx] = 1 - rad[idx] * (1-col[idx])
|
||||
col[~idx] = col[~idx] * 0.75 # out of range?
|
||||
|
||||
# Note the 2-i => BGR instead of RGB
|
||||
ch_idx = 2-i if convert_to_bgr else i
|
||||
flow_image[:,:,ch_idx] = np.floor(255 * col)
|
||||
|
||||
return flow_image
|
||||
|
||||
|
||||
def flow_to_color(flow_uv, clip_flow=None, convert_to_bgr=False):
|
||||
'''
|
||||
Expects a two dimensional flow image of shape [H,W,2]
|
||||
According to the C++ source code of Daniel Scharstein
|
||||
According to the Matlab source code of Deqing Sun
|
||||
:param flow_uv: np.ndarray of shape [H,W,2]
|
||||
:param clip_flow: float, maximum clipping value for flow
|
||||
:return:
|
||||
'''
|
||||
|
||||
assert flow_uv.ndim == 3, 'input flow must have three dimensions'
|
||||
assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
|
||||
|
||||
if clip_flow is not None:
|
||||
flow_uv = np.clip(flow_uv, 0, clip_flow)
|
||||
|
||||
u = flow_uv[:,:,0]
|
||||
v = flow_uv[:,:,1]
|
||||
|
||||
rad = np.sqrt(np.square(u) + np.square(v))
|
||||
rad_max = np.max(rad)
|
||||
|
||||
epsilon = 1e-5
|
||||
u = u / (rad_max + epsilon)
|
||||
v = v / (rad_max + epsilon)
|
||||
|
||||
return flow_compute_color(u, v, convert_to_bgr)
|
||||
|
||||
|
||||
|
||||
UNKNOWN_FLOW_THRESH = 1e7
|
||||
SMALLFLOW = 0.0
|
||||
LARGEFLOW = 1e8
|
||||
|
||||
def make_color_wheel():
|
||||
"""
|
||||
Generate color wheel according Middlebury color code
|
||||
:return: Color wheel
|
||||
"""
|
||||
RY = 15
|
||||
YG = 6
|
||||
GC = 4
|
||||
CB = 11
|
||||
BM = 13
|
||||
MR = 6
|
||||
|
||||
ncols = RY + YG + GC + CB + BM + MR
|
||||
|
||||
colorwheel = np.zeros([ncols, 3])
|
||||
|
||||
col = 0
|
||||
|
||||
# RY
|
||||
colorwheel[0:RY, 0] = 255
|
||||
colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY))
|
||||
col += RY
|
||||
|
||||
# YG
|
||||
colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG))
|
||||
colorwheel[col:col+YG, 1] = 255
|
||||
col += YG
|
||||
|
||||
# GC
|
||||
colorwheel[col:col+GC, 1] = 255
|
||||
colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC))
|
||||
col += GC
|
||||
|
||||
# CB
|
||||
colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB))
|
||||
colorwheel[col:col+CB, 2] = 255
|
||||
col += CB
|
||||
|
||||
# BM
|
||||
colorwheel[col:col+BM, 2] = 255
|
||||
colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM))
|
||||
col += + BM
|
||||
|
||||
# MR
|
||||
colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))
|
||||
colorwheel[col:col+MR, 0] = 255
|
||||
|
||||
return colorwheel
|
||||
|
||||
|
||||
|
||||
def compute_color(u, v):
|
||||
"""
|
||||
compute optical flow color map
|
||||
:param u: optical flow horizontal map
|
||||
:param v: optical flow vertical map
|
||||
:return: optical flow in color code
|
||||
"""
|
||||
[h, w] = u.shape
|
||||
img = np.zeros([h, w, 3])
|
||||
nanIdx = np.isnan(u) | np.isnan(v)
|
||||
u[nanIdx] = 0
|
||||
v[nanIdx] = 0
|
||||
|
||||
colorwheel = make_color_wheel()
|
||||
ncols = np.size(colorwheel, 0)
|
||||
|
||||
rad = np.sqrt(u**2+v**2)
|
||||
|
||||
a = np.arctan2(-v, -u) / np.pi
|
||||
|
||||
fk = (a+1) / 2 * (ncols - 1) + 1
|
||||
|
||||
k0 = np.floor(fk).astype(int)
|
||||
|
||||
k1 = k0 + 1
|
||||
k1[k1 == ncols+1] = 1
|
||||
f = fk - k0
|
||||
|
||||
for i in range(0, np.size(colorwheel,1)):
|
||||
tmp = colorwheel[:, i]
|
||||
col0 = tmp[k0-1] / 255
|
||||
col1 = tmp[k1-1] / 255
|
||||
col = (1-f) * col0 + f * col1
|
||||
|
||||
idx = rad <= 1
|
||||
col[idx] = 1-rad[idx]*(1-col[idx])
|
||||
notidx = np.logical_not(idx)
|
||||
|
||||
col[notidx] *= 0.75
|
||||
img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx)))
|
||||
|
||||
return img
|
||||
|
||||
# from https://github.com/gengshan-y/VCN
|
||||
def flow_to_image(flow):
|
||||
"""
|
||||
Convert flow into middlebury color code image
|
||||
:param flow: optical flow map
|
||||
:return: optical flow image in middlebury color
|
||||
"""
|
||||
u = flow[:, :, 0]
|
||||
v = flow[:, :, 1]
|
||||
|
||||
maxu = -999.
|
||||
maxv = -999.
|
||||
minu = 999.
|
||||
minv = 999.
|
||||
|
||||
idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)
|
||||
u[idxUnknow] = 0
|
||||
v[idxUnknow] = 0
|
||||
|
||||
maxu = max(maxu, np.max(u))
|
||||
minu = min(minu, np.min(u))
|
||||
|
||||
maxv = max(maxv, np.max(v))
|
||||
minv = min(minv, np.min(v))
|
||||
|
||||
rad = np.sqrt(u ** 2 + v ** 2)
|
||||
maxrad = max(-1, np.max(rad))
|
||||
|
||||
u = u/(maxrad + np.finfo(float).eps)
|
||||
v = v/(maxrad + np.finfo(float).eps)
|
||||
|
||||
img = compute_color(u, v)
|
||||
|
||||
idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)
|
||||
img[idx] = 0
|
||||
|
||||
return np.uint8(img)
|
124
core/utils/frame_utils.py
Normal file
124
core/utils/frame_utils.py
Normal file
@@ -0,0 +1,124 @@
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from os.path import *
|
||||
import re
|
||||
import cv2
|
||||
|
||||
TAG_CHAR = np.array([202021.25], np.float32)
|
||||
|
||||
def readFlow(fn):
|
||||
""" Read .flo file in Middlebury format"""
|
||||
# Code adapted from:
|
||||
# http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
|
||||
|
||||
# WARNING: this will work on little-endian architectures (eg Intel x86) only!
|
||||
# print 'fn = %s'%(fn)
|
||||
with open(fn, 'rb') as f:
|
||||
magic = np.fromfile(f, np.float32, count=1)
|
||||
if 202021.25 != magic:
|
||||
print('Magic number incorrect. Invalid .flo file')
|
||||
return None
|
||||
else:
|
||||
w = np.fromfile(f, np.int32, count=1)
|
||||
h = np.fromfile(f, np.int32, count=1)
|
||||
# print 'Reading %d x %d flo file\n' % (w, h)
|
||||
data = np.fromfile(f, np.float32, count=2*int(w)*int(h))
|
||||
# Reshape data into 3D array (columns, rows, bands)
|
||||
# The reshape here is for visualization, the original code is (w,h,2)
|
||||
return np.resize(data, (int(h), int(w), 2))
|
||||
|
||||
def readPFM(file):
|
||||
file = open(file, 'rb')
|
||||
|
||||
color = None
|
||||
width = None
|
||||
height = None
|
||||
scale = None
|
||||
endian = None
|
||||
|
||||
header = file.readline().rstrip()
|
||||
if header == b'PF':
|
||||
color = True
|
||||
elif header == b'Pf':
|
||||
color = False
|
||||
else:
|
||||
raise Exception('Not a PFM file.')
|
||||
|
||||
dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline())
|
||||
if dim_match:
|
||||
width, height = map(int, dim_match.groups())
|
||||
else:
|
||||
raise Exception('Malformed PFM header.')
|
||||
|
||||
scale = float(file.readline().rstrip())
|
||||
if scale < 0: # little-endian
|
||||
endian = '<'
|
||||
scale = -scale
|
||||
else:
|
||||
endian = '>' # big-endian
|
||||
|
||||
data = np.fromfile(file, endian + 'f')
|
||||
shape = (height, width, 3) if color else (height, width)
|
||||
|
||||
data = np.reshape(data, shape)
|
||||
data = np.flipud(data)
|
||||
return data
|
||||
|
||||
def writeFlow(filename,uv,v=None):
|
||||
""" Write optical flow to file.
|
||||
|
||||
If v is None, uv is assumed to contain both u and v channels,
|
||||
stacked in depth.
|
||||
Original code by Deqing Sun, adapted from Daniel Scharstein.
|
||||
"""
|
||||
nBands = 2
|
||||
|
||||
if v is None:
|
||||
assert(uv.ndim == 3)
|
||||
assert(uv.shape[2] == 2)
|
||||
u = uv[:,:,0]
|
||||
v = uv[:,:,1]
|
||||
else:
|
||||
u = uv
|
||||
|
||||
assert(u.shape == v.shape)
|
||||
height,width = u.shape
|
||||
f = open(filename,'wb')
|
||||
# write the header
|
||||
f.write(TAG_CHAR)
|
||||
np.array(width).astype(np.int32).tofile(f)
|
||||
np.array(height).astype(np.int32).tofile(f)
|
||||
# arrange into matrix form
|
||||
tmp = np.zeros((height, width*nBands))
|
||||
tmp[:,np.arange(width)*2] = u
|
||||
tmp[:,np.arange(width)*2 + 1] = v
|
||||
tmp.astype(np.float32).tofile(f)
|
||||
f.close()
|
||||
|
||||
|
||||
def readFlowKITTI(filename):
|
||||
flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH|cv2.IMREAD_COLOR)
|
||||
flow = flow[:,:,::-1].astype(np.float32)
|
||||
flow, valid = flow[:, :, :2], flow[:, :, 2]
|
||||
flow = (flow - 2**15) / 64.0
|
||||
return flow, valid
|
||||
|
||||
def writeFlowKITTI(filename, uv):
|
||||
uv = 64.0 * uv + 2**15
|
||||
valid = np.ones([uv.shape[0], uv.shape[1], 1])
|
||||
uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16)
|
||||
cv2.imwrite(filename, uv[..., ::-1])
|
||||
|
||||
|
||||
def read_gen(file_name, pil=False):
|
||||
ext = splitext(file_name)[-1]
|
||||
if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg':
|
||||
return Image.open(file_name)
|
||||
elif ext == '.bin' or ext == '.raw':
|
||||
return np.load(file_name)
|
||||
elif ext == '.flo':
|
||||
return readFlow(file_name).astype(np.float32)
|
||||
elif ext == '.pfm':
|
||||
flow = readPFM(file_name).astype(np.float32)
|
||||
return flow[:, :, :-1]
|
||||
return []
|
62
core/utils/utils.py
Normal file
62
core/utils/utils.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
import numpy as np
|
||||
from scipy import interpolate
|
||||
|
||||
|
||||
def bilinear_sampler(img, coords, mode='bilinear', mask=False):
|
||||
""" Wrapper for grid_sample, uses pixel coordinates """
|
||||
H, W = img.shape[-2:]
|
||||
xgrid, ygrid = coords.split([1,1], dim=-1)
|
||||
xgrid = 2*xgrid/(W-1) - 1
|
||||
ygrid = 2*ygrid/(H-1) - 1
|
||||
|
||||
grid = torch.cat([xgrid, ygrid], dim=-1)
|
||||
img = F.grid_sample(img, grid, align_corners=True)
|
||||
|
||||
if mask:
|
||||
mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1)
|
||||
return img, mask.float()
|
||||
|
||||
return img
|
||||
|
||||
def forward_interpolate(flow):
|
||||
flow = flow.detach().cpu().numpy()
|
||||
dx, dy = flow[0], flow[1]
|
||||
|
||||
ht, wd = dx.shape
|
||||
x0, y0 = np.meshgrid(np.arange(wd), np.arange(ht))
|
||||
|
||||
x1 = x0 + dx
|
||||
y1 = y0 + dy
|
||||
|
||||
x1 = x1.reshape(-1)
|
||||
y1 = y1.reshape(-1)
|
||||
dx = dx.reshape(-1)
|
||||
dy = dy.reshape(-1)
|
||||
|
||||
valid = (x1 > 0) & (x1 < wd) & (y1 > 0) & (y1 < ht)
|
||||
x1 = x1[valid]
|
||||
y1 = y1[valid]
|
||||
dx = dx[valid]
|
||||
dy = dy[valid]
|
||||
|
||||
flow_x = interpolate.griddata(
|
||||
(x1, y1), dx, (x0, y0), method='nearest')
|
||||
|
||||
flow_y = interpolate.griddata(
|
||||
(x1, y1), dy, (x0, y0), method='nearest')
|
||||
|
||||
flow = np.stack([flow_x, flow_y], axis=0)
|
||||
return torch.from_numpy(flow).float()
|
||||
|
||||
|
||||
def coords_grid(batch, ht, wd):
|
||||
coords = torch.meshgrid(torch.arange(ht), torch.arange(wd))
|
||||
coords = torch.stack(coords[::-1], dim=0).float()
|
||||
return coords[None].repeat(batch, 1, 1, 1)
|
||||
|
||||
|
||||
def upflow8(flow, mode='bilinear'):
|
||||
new_size = (8 * flow.shape[2], 8 * flow.shape[3])
|
||||
return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True)
|
Reference in New Issue
Block a user