Newer
Older
DeepTIAS / Features / DeepLearning / Reference / Tang's / image_enhancement.py
@ke96 ke96 on 2 Nov 2020 3 KB Refactor
import tensorflow as tf
import os
import random
import numpy as np

source_file = "D:/test13/SampleSkin1/"  # 原始文件地址
source_file2 = "D:/test13/LabelSkin1/"  # 原始文件地址
target_file = "D:/test13/SampleSkin2/"  # 修改后的文件地址
target_file2 = "D:/test13/LabelSkin2/"  # 修改后的文件地址
num = 2000  # 产生图片次数
flip = False
flip2 = False
seed = None
scale_size = 256  # help="scale images to this size before cropping to 256x256")
brightness = False
contrast = False
hue = False
saturation = False
gamma = False
aspect_ratio = 1.0  # aspect ratio of output images (width/height)
CROP_SIZE = 256

if not os.path.exists(target_file):  # 如果不存在target_file,则创造一个
    os.makedirs(target_file)

file_list = os.listdir(source_file)  # 读取原始文件的路径

if not os.path.exists(target_file2):  # 如果不存在target_file,则创造一个
    os.makedirs(target_file2)

file_list2 = os.listdir(source_file2)  # 读取原始文件的路径


# 图像预处理,翻转、改变形状
def transform(image):
    r = image

    # area produces a nice downscaling, but does nearest neighbor for upscaling
    # assume we're going to be doing downscaling here
    r = tf.image.resize_images(r, [scale_size, scale_size], method=tf.image.ResizeMethod.AREA)

    offset = tf.cast(tf.floor(tf.random_uniform([2], 0, scale_size - CROP_SIZE + 1, seed=seed)), dtype=tf.int32)
    if scale_size > CROP_SIZE:
        r = tf.image.crop_to_bounding_box(r, offset[0], offset[1], CROP_SIZE, CROP_SIZE)
    elif scale_size < CROP_SIZE:
        raise Exception("scale size cannot be less than crop size")
    return r


def transform2(image):
    r = image
    if flip2:
        r = tf.image.flip_left_right(r)
        # r = tf.image.flip_up_down(r)
        # r = tf.image.random_flip_left_right(r, seed=seed)
        # r = tf.image.random_flip_up_down(r, seed=seed)
    return r


def transform3(image, color_ordering=0):
    r = image
    a = random.uniform(1, 2)
    # b = random.uniform(1, 5)
    b = 1
    if color_ordering == 0:
        if brightness:
            r = tf.image.random_brightness(r, max_delta=0.2)
        if contrast:
            r = tf.image.random_contrast(r, lower=0.5, upper=1.5)
        if saturation:
            r = tf.image.random_saturation(r, lower=1, upper=2)
        if gamma:
            r = tf.image.adjust_gamma(r, gain=a, gamma=b)
        # if hue:
        #     r = tf.image.random_hue(r, max_delta=0.5)

    return r


with tf.Session() as sess:
    for i in range(num):
        image_raw_data = tf.gfile.FastGFile(source_file + file_list[i], "rb").read()  # 读取图片
        image_raw_data2 = tf.gfile.FastGFile(source_file2 + file_list2[i], "rb").read()  # 读取图片
        print("Processing: ", str(i))
        image_data = tf.image.decode_jpeg(image_raw_data)
        # image_data = tf.image.convert_image_dtype(image_data, dtype=tf.float32)
        image_data2 = tf.image.decode_jpeg(image_raw_data2)
        # image_data2 = tf.image.convert_image_dtype(image_data2, dtype=tf.float32)

        adjust = tf.image.flip_left_right(image_data)
        adjust2 = tf.image.flip_left_right(image_data2)

        image_data = tf.image.convert_image_dtype(adjust, dtype=tf.uint8)
        image_data2 = tf.image.convert_image_dtype(adjust2, dtype=tf.uint8)

        encode_data = tf.image.encode_jpeg(image_data)
        encode_data2 = tf.image.encode_jpeg(image_data2)

        with tf.gfile.GFile(target_file + file_list[i] + ".jpeg", "wb") as f1:
            f1.write(encode_data.eval())

        with tf.gfile.GFile(target_file2 + file_list2[i] + ".jpeg", "wb") as f2:
            f2.write(encode_data2.eval())

print("Finished!!!")