I have a 1D tensor that I wish to partition into overlapping blocks. I\'m thinking of something like:
tensor = tf.constant([1, 2, 3, 4, 5, 6, 7])
Here is a relatively straight forward approach using your example:
def overlapping_blocker(tensor,block_size,stride):
blocks = []
n = tensor.get_shape().as_list()[0]
ilo = range(0, n, stride)
ihi = range(block_size, n+1, stride)
ilohi = zip(ilo, ihi).
for ilo, ihi in ilohi:
blocks.append(tensor[ilo:ihi])
return(tf.pack(blocks, 0))
with tf.Session() as sess:
tensor = tf.constant([1., 2., 3., 4., 5., 6., 7.])
block_tensor = overlapping_blocker(tensor, 3, 2)
print(sess.run(block_tensor))
Output:
[[ 1. 2. 3.]
[ 3. 4. 5.]
[ 5. 6. 7.]]
I am not sure whether this question has been sufficiently answered, but you can use a python generator function to create overlapping windows:
def gen_batch():
# compute number of batches to emit
num_of_batches = round(((len(sequence) - batch_size) / stride))
# emit batches
for i in range(0, num_of_batches * stride, stride):
result = np.array(sequence[i:i + batch_size])
yield result
sequence = np.array[1,2,3,4,5,6,7,8,9]
batch_size = 3
stride = 2
gen = gen_batch()
print(next(gen))
[1,2,3]
print(next(gen))
[3,4,5]
...
print(next(gen))
[7,8,9]
Once you have defined your generator function, you can use TensorFlow's Dataset class to call each slice:
ds = tf.data.Dataset.from_generator(
gen_batch,
(tf.float64),
(tf.TensorShape([batch_size, dim_width])))
ds_out = ds.make_one_shot_iterator().get_next()
print(sess.run(ds_out)))
[1,2,3]
print(sess.run(ds_out)))
[3,4,5]
...
print(sess.run(ds_out)))
[7,8,9]
I think the function
tf.signal.overlap_and_add(
signal, frame_step, name=None
)
might be what you want. It solved my problem. It has been answered in
If you're looking for a way to get each rolling window as an individual tensor (i.e. each time you call window.eval()
your window moves one over. You can use tf.FIFOQueue
as well as tf.train.range_input_producer
to make a queue that does this:
EDIT: updated to work with variable length tensors as requested in your original answer
def window_input_producer(tensor, window_size, capacity=32, num_epochs=None):
num_windows = tf.shape(tensor)[0] - window_size
range_queue = tf.train.range_input_producer(
num_windows,
shuffle=False,
capacity=capacity,
num_epochs=num_epochs
)
index = range_queue.dequeue()
window = tensor[index:index + window_size]
queue = tf.FIFOQueue(capacity=capacity,
dtypes=[tensor.dtype.base_dtype],
shapes=[window_size])
enq = queue.enqueue(window)
tf.train.add_queue_runner(
tf.train.QueueRunner(queue, [enq])
)
return queue.dequeue()
You can use tf.nn.conv2d to help. Basically, you take a sliding filter of block_size over the input, stepping by stride. To make all the matrix indexes line up, you have to do some reshaping.
import tensorflow as tf
def overlap(tensor, block_size=3, stride=2):
reshaped = tf.reshape(tensor, [1,1,-1,1])
# Construct diagonal identity matrix for conv2d filters.
ones = tf.ones(block_size, dtype=tf.float32)
ident = tf.diag(ones)
filter_dim = [1, block_size, block_size, 1]
filter_matrix = tf.reshape(ident, filter_dim)
stride_window = [1, 1, stride, 1]
# Save the output tensors of the convolutions
filtered_conv = []
for f in tf.unstack(filter_matrix, axis=1):
reshaped_filter = tf.reshape(f, [1, block_size, 1, 1])
c = tf.nn.conv2d(reshaped, reshaped_filter, stride_window, padding='VALID')
filtered_conv.append(c)
# Put the convolutions into a tensor and squeeze to get rid of extra dimensions.
t = tf.stack(filtered_conv, axis=3)
return tf.squeeze(t)
# Calculate the overlapping strided slice for the input tensor.
tensor = tf.constant([1, 2, 3, 4, 5, 6, 7], dtype=tf.float32)
overlap_tensor = overlap(tensor, block_size=3, stride=2)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
in_t, overlap_t = sess.run([tensor, overlap_tensor])
print 'input tensor:'
print in_t
print 'overlapping strided slice:'
print overlap_t
Should give you the output:
input tensor:
[ 1. 2. 3. 4. 5. 6. 7.]
overlapping strided slice:
[[ 1. 2. 3.]
[ 3. 4. 5.]
[ 5. 6. 7.]]
This is the initial version I got working, which doesn't allow for variable block_size, but I think it's easier to see what's going on with the convolution filters - we take a vector of 3 values, every stride steps.
def overlap(tensor, stride=2):
# Reshape the tensor to allow it to be passed in to conv2d.
reshaped = tf.reshape(tensor, [1,1,-1,1])
# Construct the block_size filters.
filter_dim = [1, -1, 1, 1]
x_filt = tf.reshape(tf.constant([1., 0., 0.]), filter_dim)
y_filt = tf.reshape(tf.constant([0., 1., 0.]), filter_dim)
z_filt = tf.reshape(tf.constant([0., 0., 1.]), filter_dim)
# Stride along the tensor with the above filters.
stride_window = [1, 1, stride, 1]
x = tf.nn.conv2d(reshaped, x_filt, stride_window, padding='VALID')
y = tf.nn.conv2d(reshaped, y_filt, stride_window, padding='VALID')
z = tf.nn.conv2d(reshaped, z_filt, stride_window, padding='VALID')
# Pack the three tensors along 4th dimension.
result = tf.stack([x, y, z], axis=4)
# Squeeze to get rid of the extra dimensions.
result = tf.squeeze(result)
return result
You can achieve the same using tf.extract_image_patches
.
tensor = tf.placeholder(tf.int32, [None])
def overlapping_blocker(tensor,block_size=3,stride=2):
return tf.squeeze(tf.extract_image_patches(tensor[None,...,None, None], ksizes=[1, block_size, 1, 1], strides=[1, stride, 1, 1], rates=[1, 1, 1, 1], padding='VALID'))
result = overlapping_blocker(tensor,block_size=3,stride=2)
sess = tf.InteractiveSession()
print(result.eval({tensor:np.array([1, 2, 3, 4, 5, 6, 7], np.int32)}))
#[[1 2 3]
#[3 4 5]
#[5 6 7]]