tensorflow: ¿cómo rotar una imagen para el aumento de datos?

En tensorflow, me gustaría rotar una imagen desde un ángulo aleatorio, para el aumento de datos. Pero no encuentro esta transformación en el módulo tf.image.

Actualización : ver la respuesta de @ astromme a continuación. Tensorflow ahora admite imágenes giratorias de forma nativa.

Lo que puede hacer mientras no exista un método nativo en tensorflow es algo como esto:

from PIL import Image sess = tf.InteractiveSession() # Pass image tensor object to a PIL image image = Image.fromarray(image.eval()) # Use PIL or other library of the sort to rotate rotated = Image.Image.rotate(image, degrees) # Convert rotated image back to tensor rotated_tensor = tf.convert_to_tensor(np.array(rotated)) 

Esto se puede hacer en tensorflow ahora :

 tf.contrib.image.rotate(images, degrees * math.pi / 180, interpolation='BILINEAR') 

Como quería poder rotar los tensores, se me ocurrió la siguiente pieza de código, que hace girar un tensor [altura, anchura, profundidad] en un ángulo determinado:

 def rotate_image_tensor(image, angle, mode='black'): """ Rotates a 3D tensor (HWD), which represents an image by given radian angle. New image has the same size as the input image. mode controls what happens to border pixels. mode = 'black' results in black bars (value 0 in unknown areas) mode = 'white' results in value 255 in unknown areas mode = 'ones' results in value 1 in unknown areas mode = 'repeat' keeps repeating the closest pixel known """ s = image.get_shape().as_list() assert len(s) == 3, "Input needs to be 3D." assert (mode == 'repeat') or (mode == 'black') or (mode == 'white') or (mode == 'ones'), "Unknown boundary mode." image_center = [np.floor(x/2) for x in s] # Coordinates of new image coord1 = tf.range(s[0]) coord2 = tf.range(s[1]) # Create vectors of those coordinates in order to vectorize the image coord1_vec = tf.tile(coord1, [s[1]]) coord2_vec_unordered = tf.tile(coord2, [s[0]]) coord2_vec_unordered = tf.reshape(coord2_vec_unordered, [s[0], s[1]]) coord2_vec = tf.reshape(tf.transpose(coord2_vec_unordered, [1, 0]), [-1]) # center coordinates since rotation center is supposed to be in the image center coord1_vec_centered = coord1_vec - image_center[0] coord2_vec_centered = coord2_vec - image_center[1] coord_new_centered = tf.cast(tf.pack([coord1_vec_centered, coord2_vec_centered]), tf.float32) # Perform backward transformation of the image coordinates rot_mat_inv = tf.dynamic_stitch([[0], [1], [2], [3]], [tf.cos(angle), tf.sin(angle), -tf.sin(angle), tf.cos(angle)]) rot_mat_inv = tf.reshape(rot_mat_inv, shape=[2, 2]) coord_old_centered = tf.matmul(rot_mat_inv, coord_new_centered) # Find nearest neighbor in old image coord1_old_nn = tf.cast(tf.round(coord_old_centered[0, :] + image_center[0]), tf.int32) coord2_old_nn = tf.cast(tf.round(coord_old_centered[1, :] + image_center[1]), tf.int32) # Clip values to stay inside image coordinates if mode == 'repeat': coord_old1_clipped = tf.minimum(tf.maximum(coord1_old_nn, 0), s[0]-1) coord_old2_clipped = tf.minimum(tf.maximum(coord2_old_nn, 0), s[1]-1) else: outside_ind1 = tf.logical_or(tf.greater(coord1_old_nn, s[0]-1), tf.less(coord1_old_nn, 0)) outside_ind2 = tf.logical_or(tf.greater(coord2_old_nn, s[1]-1), tf.less(coord2_old_nn, 0)) outside_ind = tf.logical_or(outside_ind1, outside_ind2) coord_old1_clipped = tf.boolean_mask(coord1_old_nn, tf.logical_not(outside_ind)) coord_old2_clipped = tf.boolean_mask(coord2_old_nn, tf.logical_not(outside_ind)) coord1_vec = tf.boolean_mask(coord1_vec, tf.logical_not(outside_ind)) coord2_vec = tf.boolean_mask(coord2_vec, tf.logical_not(outside_ind)) coord_old_clipped = tf.cast(tf.transpose(tf.pack([coord_old1_clipped, coord_old2_clipped]), [1, 0]), tf.int32) # Coordinates of the new image coord_new = tf.transpose(tf.cast(tf.pack([coord1_vec, coord2_vec]), tf.int32), [1, 0]) image_channel_list = tf.split(2, s[2], image) image_rotated_channel_list = list() for image_channel in image_channel_list: image_chan_new_values = tf.gather_nd(tf.squeeze(image_channel), coord_old_clipped) if (mode == 'black') or (mode == 'repeat'): background_color = 0 elif mode == 'ones': background_color = 1 elif mode == 'white': background_color = 255 image_rotated_channel_list.append(tf.sparse_to_dense(coord_new, [s[0], s[1]], image_chan_new_values, background_color, validate_indices=False)) image_rotated = tf.transpose(tf.pack(image_rotated_channel_list), [1, 2, 0]) return image_rotated 

Rotación y recorte en TensorFlow

Personalmente necesitaba la rotación de la imagen y recortar las funciones de bordes negros en TensorFlow como se muestra a continuación. Ejemplo Y podría implementar esta función de la siguiente manera.

 def _rotate_and_crop(image, output_height, output_width, rotation_degree, do_crop): """Rotate the given image with the given rotation degree and crop for the black edges if necessary Args: image: A `Tensor` representing an image of arbitrary size. output_height: The height of the image after preprocessing. output_width: The width of the image after preprocessing. rotation_degree: The degree of rotation on the image. do_crop: Do cropping if it is True. Returns: A rotated image. """ # Rotate the given image with the given rotation degree if rotation_degree != 0: image = tf.contrib.image.rotate(image, math.radians(rotation_degree), interpolation='BILINEAR') # Center crop to ommit black noise on the edges if do_crop == True: lrr_width, lrr_height = _largest_rotated_rect(output_height, output_width, math.radians(rotation_degree)) resized_image = tf.image.central_crop(image, float(lrr_height)/output_height) image = tf.image.resize_images(resized_image, [output_height, output_width], method=tf.image.ResizeMethod.BILINEAR, align_corners=False) return image def _largest_rotated_rect(w, h, angle): """ Given a rectangle of size wxh that has been rotated by 'angle' (in radians), computes the width and height of the largest possible axis-aligned rectangle within the rotated rectangle. Original JS code by 'Andri' and Magnus Hoff from Stack Overflow Converted to Python by Aaron Snoswell Source: http://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders """ quadrant = int(math.floor(angle / (math.pi / 2))) & 3 sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle alpha = (sign_alpha % math.pi + math.pi) % math.pi bb_w = w * math.cos(alpha) + h * math.sin(alpha) bb_h = w * math.sin(alpha) + h * math.cos(alpha) gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w) delta = math.pi - alpha - gamma length = h if (w < h) else w d = length * math.cos(alpha) a = d * math.sin(alpha) / math.sin(delta) y = a * math.cos(gamma) x = y * math.tan(gamma) return ( bb_w - 2 * x, bb_h - 2 * y ) 

Si necesita una implementación adicional del ejemplo y la visualización en TensorFlow, puede usar este repository . Espero que esto pueda ser de ayuda para otras personas.

Aquí está la respuesta @zimmermc actualizada a Tensorflow v0.12

Cambios:

  • pack() ahora es stack()
  • orden de los parámetros de split invertidos

     def rotate_image_tensor(image, angle, mode='white'): """ Rotates a 3D tensor (HWD), which represents an image by given radian angle. New image has the same size as the input image. mode controls what happens to border pixels. mode = 'black' results in black bars (value 0 in unknown areas) mode = 'white' results in value 255 in unknown areas mode = 'ones' results in value 1 in unknown areas mode = 'repeat' keeps repeating the closest pixel known """ s = image.get_shape().as_list() assert len(s) == 3, "Input needs to be 3D." assert (mode == 'repeat') or (mode == 'black') or (mode == 'white') or (mode == 'ones'), "Unknown boundary mode." image_center = [np.floor(x/2) for x in s] # Coordinates of new image coord1 = tf.range(s[0]) coord2 = tf.range(s[1]) # Create vectors of those coordinates in order to vectorize the image coord1_vec = tf.tile(coord1, [s[1]]) coord2_vec_unordered = tf.tile(coord2, [s[0]]) coord2_vec_unordered = tf.reshape(coord2_vec_unordered, [s[0], s[1]]) coord2_vec = tf.reshape(tf.transpose(coord2_vec_unordered, [1, 0]), [-1]) # center coordinates since rotation center is supposed to be in the image center coord1_vec_centered = coord1_vec - image_center[0] coord2_vec_centered = coord2_vec - image_center[1] coord_new_centered = tf.cast(tf.stack([coord1_vec_centered, coord2_vec_centered]), tf.float32) # Perform backward transformation of the image coordinates rot_mat_inv = tf.dynamic_stitch([[0], [1], [2], [3]], [tf.cos(angle), tf.sin(angle), -tf.sin(angle), tf.cos(angle)]) rot_mat_inv = tf.reshape(rot_mat_inv, shape=[2, 2]) coord_old_centered = tf.matmul(rot_mat_inv, coord_new_centered) # Find nearest neighbor in old image coord1_old_nn = tf.cast(tf.round(coord_old_centered[0, :] + image_center[0]), tf.int32) coord2_old_nn = tf.cast(tf.round(coord_old_centered[1, :] + image_center[1]), tf.int32) # Clip values to stay inside image coordinates if mode == 'repeat': coord_old1_clipped = tf.minimum(tf.maximum(coord1_old_nn, 0), s[0]-1) coord_old2_clipped = tf.minimum(tf.maximum(coord2_old_nn, 0), s[1]-1) else: outside_ind1 = tf.logical_or(tf.greater(coord1_old_nn, s[0]-1), tf.less(coord1_old_nn, 0)) outside_ind2 = tf.logical_or(tf.greater(coord2_old_nn, s[1]-1), tf.less(coord2_old_nn, 0)) outside_ind = tf.logical_or(outside_ind1, outside_ind2) coord_old1_clipped = tf.boolean_mask(coord1_old_nn, tf.logical_not(outside_ind)) coord_old2_clipped = tf.boolean_mask(coord2_old_nn, tf.logical_not(outside_ind)) coord1_vec = tf.boolean_mask(coord1_vec, tf.logical_not(outside_ind)) coord2_vec = tf.boolean_mask(coord2_vec, tf.logical_not(outside_ind)) coord_old_clipped = tf.cast(tf.transpose(tf.stack([coord_old1_clipped, coord_old2_clipped]), [1, 0]), tf.int32) # Coordinates of the new image coord_new = tf.transpose(tf.cast(tf.stack([coord1_vec, coord2_vec]), tf.int32), [1, 0]) image_channel_list = tf.split(image, s[2], 2) image_rotated_channel_list = list() for image_channel in image_channel_list: image_chan_new_values = tf.gather_nd(tf.squeeze(image_channel), coord_old_clipped) if (mode == 'black') or (mode == 'repeat'): background_color = 0 elif mode == 'ones': background_color = 1 elif mode == 'white': background_color = 255 image_rotated_channel_list.append(tf.sparse_to_dense(coord_new, [s[0], s[1]], image_chan_new_values, background_color, validate_indices=False)) image_rotated = tf.transpose(tf.stack(image_rotated_channel_list), [1, 2, 0]) return image_rotated 

Para rotar una imagen o un lote de imágenes en el sentido contrario a las agujas del reloj por múltiplos de 90 grados, puede usar tf.image.rot90(image,k=1,name=None) .

k denota el número de rotaciones de 90 grados que desea realizar.

En el caso de una sola imagen, la image es un 3-D Tensor of shape [height, width, channels] y en el caso de un lote de imágenes, la image es un 4-D Tensor of shape [batch, height, width, channels]