using System;
using System.Windows.Forms;
using System.IO;
using System.Drawing;
using System.Drawing.Imaging;
using System.ComponentModel;
using System.Runtime.InteropServices;
using System.Threading;
using System.Diagnostics;
using System.Linq;
using System.Collections.Generic;
using TensorFlow;
using OpenCvSharp;
namespace Tongue_extraction
{
public partial class Form1 : Form
{
//Use the RemoveSmallRegionDLL
[DllImport(@"RemoveSmallRegionDLL.dll", EntryPoint = "RemoveSmallRegion", SetLastError = true, CharSet = CharSet.Ansi, ExactSpelling = false, CallingConvention = CallingConvention.StdCall)]
extern static void RemoveSmallRegion(string name, string name2, int AreaLimit, int CheckMode, int NeihborMode);
Mat mat_drawBox = new Mat(1024, 1280, MatType.CV_8UC3, 1);
Mat mat_roi = new Mat(1024, 1280, MatType.CV_8UC3, 1);
Mat mat_input = new Mat(1024, 1280, MatType.CV_8UC3, 1);
Mat mat_roi256 = new Mat(256, 256, MatType.CV_8UC3, 1);
Mat mat_roisize = new Mat(1024, 1280, MatType.CV_8UC3, 1);
Mat mat_output = new Mat(1024, 1280, MatType.CV_8UC1, 1);
Mat mat_outputNoBox = new Mat(1024, 1280, MatType.CV_8UC1, 1);
Mat mat_outputChanged = new Mat(1024, 1280, MatType.CV_8UC1, 1);
Mat mat_mask = new Mat(1024, 1280, MatType.CV_8UC1, 1);
Mat mat_extraction = new Mat(1024, 1280, MatType.CV_8UC3, 1);
Mat mat_cropped;
Mat mat_outputSRGNoBox = new Mat(1024, 1280, MatType.CV_8UC1, 1);
Mat mat_outputSRG = new Mat(1024, 1280, MatType.CV_8UC1, 1);
Mat mat_maskSRG = new Mat(1024, 1280, MatType.CV_8UC1, 1);
public static Bitmap bitmap_bitch;
string[] path;
OpenCvSharp.Rect rectangle;
byte[] byte_inputDetection;
byte[] byte_inputSegmentation;
OpenCvSharp.Point P1 = new OpenCvSharp.Point();
OpenCvSharp.Point P2 = new OpenCvSharp.Point();
byte[] mask = new byte[200000];
string basepath;
string imageFile;
string time;
string modelFile;
int ii = 0;
int count;
int areaCount;
Rect roi = new Rect();
int mmp = 0;
int pmm = 0;
int check_detection = 0;
float max_score = 0;
private ManualResetEvent manualReset = new ManualResetEvent(true);
string fileName_info = Directory.GetCurrentDirectory() + "//info//" + DateTime.Now.ToLocalTime().ToString("yyyyMMddhhmmss") + ".csv";
StreamWriter sw;
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
comboBox_mode.SelectedIndex = 1;
}
private void Button_start_Click(object sender, EventArgs e)
{
path = Directory.GetFiles("data");
button_start.Enabled = false;
button_pause.Enabled = true;
pictureBox_input.Image = null;
pictureBox_input.Refresh();
pictureBox_detection.Image = null;
pictureBox_detection.Refresh();
pictureBox_cropResized.Image = null;
pictureBox_cropResized.Refresh();
pictureBox_output.Image = null;
pictureBox_output.Refresh();
pictureBox_outputSRG.Image = null;
pictureBox_outputSRG.Refresh();
pictureBox_maskSRG.Image = null;
pictureBox_maskSRG.Refresh();
pictureBox_extraction.Image = null;
pictureBox_extraction.Refresh();
pictureBox_last.Image = null;
pictureBox_last.Refresh();
comboBox_mode.Enabled = false;
if (comboBox_mode.SelectedIndex == 0)
{
this.backgroundWorker1.DoWork += new System.ComponentModel.DoWorkEventHandler(this.BackgroundWorker1_DoWork_Image);
backgroundWorker1.RunWorkerAsync();
}
else if (comboBox_mode.SelectedIndex == 1)
{
this.backgroundWorker1.DoWork += new System.ComponentModel.DoWorkEventHandler(this.BackgroundWorker1_DoWork_ImageandCalib);
backgroundWorker1.RunWorkerAsync();
}
else if (comboBox_mode.SelectedIndex == 2)
{
//this.backgroundWorker1.DoWork += new System.ComponentModel.DoWorkEventHandler(this.BackgroundWorker1_DoWork_MaskImage);
//backgroundWorker1.RunWorkerAsync();
MessageBox.Show("This mode is not implemented.");
}
else
{
MessageBox.Show("Please select the mode on the combobox");
}
}
public static class ImageUtil
{
// Convert the image in filename to a Tensor suitable as input to the Inception model.
public static TFTensor CreateTensorFromImageFile(byte[] contents, TFDataType destinationDataType = TFDataType.UInt8)
{
// DecodeJpeg uses a scalar String-valued tensor as input.
var tensor = TFTensor.CreateString(contents);
TFGraph graph;
TFOutput input, output;
// Construct a graph to normalize the image
ConstructGraphToNormalizeImage(out graph, out input, out output, destinationDataType);
// Execute that graph to normalize this one image
using (var session = new TFSession(graph))
{
var normalized = session.Run(
inputs: new[] { input },
inputValues: new[] { tensor },
outputs: new[] { output });
if (session != null)
{
session.Dispose();
}
if (tensor != null)
{
tensor.Dispose();
}
if (graph != null)
{
graph.Dispose();
}
GC.Collect();
GC.WaitForPendingFinalizers();
GC.Collect();
return normalized[0];
}
}
// The inception model takes as input the image described by a Tensor in a very
// specific normalized format (a particular image size, shape of the input tensor,
// normalized pixel values etc.).
//
// This function constructs a graph of TensorFlow operations which takes as
// input a JPEG-encoded string and returns a tensor suitable as input to the
// inception model.
public static void ConstructGraphToNormalizeImage(out TFGraph graph, out TFOutput input, out TFOutput output, TFDataType destinationDataType = TFDataType.UInt8)
{
const int W = 256;
const int H = 256;
const float Mean = 0;
const float Scale = 1;
graph = new TFGraph();
input = graph.Placeholder(TFDataType.String);
output = graph.Cast(graph.Div(
x: graph.Sub(
x: graph.ResizeBilinear(
images: graph.ExpandDims(
input: graph.Cast(
graph.DecodeJpeg(contents: input, channels: 3), DstT: destinationDataType),
dim: graph.Const(0, "make_batch")),
size: graph.Const(new int[] { W, H }, "size")),
y: graph.Const(Mean, "mean")),
y: graph.Const(Scale, "scale")), destinationDataType);
GC.Collect();
GC.WaitForPendingFinalizers();
GC.Collect();
}
}
public static class ImageUtil2
{
// Convert the image in filename to a Tensor suitable as input to the Inception model.
public static TFTensor CreateTensorFromImageFile(byte[] contents, TFDataType destinationDataType = TFDataType.Float)
{
// DecodeJpeg uses a scalar String-valued tensor as input.
var tensor = TFTensor.CreateString(contents);
TFGraph graph;
TFOutput input, output;
// Construct a graph to normalize the image
ConstructGraphToNormalizeImage(out graph, out input, out output, destinationDataType);
// Execute that graph to normalize this one image
using (var session = new TFSession(graph))
{
var normalized = session.Run(
inputs: new[] { input },
inputValues: new[] { tensor },
outputs: new[] { output });
if (session != null)
{
session.Dispose();
}
if (tensor != null)
{
tensor.Dispose();
}
if (graph != null)
{
graph.Dispose();
}
GC.Collect();
GC.WaitForPendingFinalizers();
GC.Collect();
return normalized[0];
}
}
// The inception model takes as input the image described by a Tensor in a very
// specific normalized format (a particular image size, shape of the input tensor,
// normalized pixel values etc.).
//
// This function constructs a graph of TensorFlow operations which takes as
// input a JPEG-encoded string and returns a tensor suitable as input to the
// inception model.
public static void ConstructGraphToNormalizeImage(out TFGraph graph, out TFOutput input, out TFOutput output, TFDataType destinationDataType = TFDataType.Float)
{
const int W = 256;
const int H = 256;
//const int W = 512;
//const int H = 512;
const float Mean = 128;
const float Scale = 128;
graph = new TFGraph();
input = graph.Placeholder(TFDataType.String);
output = graph.Cast(graph.Div(
x: graph.Sub(
x: graph.ResizeBilinear(
images: graph.ExpandDims(
input: graph.Cast(
graph.DecodeJpeg(contents: input, channels: 3), DstT: TFDataType.Float),
dim: graph.Const(0, "make_batch")),
size: graph.Const(new int[] { W, H }, "size")),
y: graph.Const(Mean, "mean")),
y: graph.Const(Scale, "scale")), destinationDataType);
GC.Collect();
GC.WaitForPendingFinalizers();
GC.Collect();
}
}
public static byte[] Bitmap2Byte(Bitmap bitmap)
{
using (MemoryStream stream = new MemoryStream())
{
bitmap.Save(stream, ImageFormat.Jpeg);
byte[] data = new byte[stream.Length];
stream.Seek(0, SeekOrigin.Begin);
stream.Read(data, 0, Convert.ToInt32(stream.Length));
return data;
}
}
private static string DownloadDefaultModel(string dir)
{
var modelFile = Path.Combine(dir, "pruning101_step11999.pb");
return modelFile;
}
private static string DownloadDefaultModel_noBoxPix2Pix(string dir)
{
var modelFile = Path.Combine(dir, "424_256_64_5999_scale300_enhancment_L1loss0.02001.pb");
return modelFile;
}
public static Bitmap ToGrayBitmap(byte[] rawValues, int width, int height)
{
//// Apply for a target bitmap variable and lock its memory area
Bitmap bmp = new Bitmap(width, height, PixelFormat.Format8bppIndexed);
BitmapData bmpData = bmp.LockBits(new Rectangle(0, 0, width, height),
ImageLockMode.WriteOnly, PixelFormat.Format8bppIndexed);
//// Get image parameters
int stride = bmpData.Stride; // Width of the scan line
int offset = stride - width; // Show gap between width and scan line width
IntPtr iptr = bmpData.Scan0; // Get the memory start position of bmpData
int scanBytes = stride * height;// Use stride width to indicate that this is the size of the memory area
//// The following is to convert the original display size byte array to the byte array actually stored in memory.
int posScan = 0, posReal = 0;// Set two position pointers respectively, pointing to the source array and the target array
byte[] pixelValues = new byte[scanBytes]; //Allocate memory for the target array
for (int x = 0; x < height; x++)
{
//// The following loop section is a simulated line scan
for (int y = 0; y < width; y++)
{
pixelValues[posScan++] = rawValues[posReal++];
}
posScan += offset; //At the end of the line scan, move the target position pointer over that "gap"
}
//// Use Marshal's Copy method to copy the just obtained memory byte array into BitmapData.
System.Runtime.InteropServices.Marshal.Copy(pixelValues, 0, iptr, scanBytes);
bmp.UnlockBits(bmpData); // Unlock the memory area
//// The following code is to modify the index table of the generated bitmap, from pseudo color to grayscale
ColorPalette tempPalette;
using (Bitmap tempBmp = new Bitmap(1, 1, PixelFormat.Format8bppIndexed))
{
tempPalette = tempBmp.Palette;
}
for (int i = 0; i < 256; i++)
{
tempPalette.Entries[i] = Color.FromArgb(i, i, i);
}
bmp.Palette = tempPalette;
//// The algorithm ends here and returns the result.
return bmp;
}
private void BackgroundWorker1_DoWork_Image(object sender, DoWorkEventArgs e)
{
// boundingboxなどのinfo出力用
sw = new StreamWriter(fileName_info, false, System.Text.Encoding.GetEncoding("shift_jis"));
sw.WriteLine(
"image" + "," +
"top left X" + "," + "top left Y" + "," + "bottom right X" + "," + "bottom right Y" + "," +
"Width" + "," + "Height" + "," + "Area" + "," + "Gloss Count");
using (MemoryStream ms = new MemoryStream())
{
for (int a = 0; a < path.Length; a++)
{
manualReset.WaitOne();
ii = 0;
basepath = Directory.GetCurrentDirectory();
imageFile = System.Text.RegularExpressions.Regex.Replace(path[a], "data", "");
Invoke((MethodInvoker)delegate
{
label_processingFileName.Text = "Processing File: " + imageFile;
count = a + 1;
label_totalProgress.Text = "Total Progress: " + count + "/" + path.Length;
});
mat_input = Cv2.ImRead(basepath + "\\data" + imageFile, ImreadModes.Color);
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_input);
Invoke((MethodInvoker)delegate
{
pictureBox_input.Image = bitmap_bitch;
pictureBox_input.Refresh();
});
label8.BackColor = Color.White;
label1.BackColor = Color.Red;
mat_drawBox = mat_input.Clone();
mat_cropped = new Mat(mat_input.Size(), MatType.CV_8UC3, 1);
byte_inputDetection = Bitmap2Byte(bitmap_bitch);
using (var graph = new TFGraph())
{
var model = File.ReadAllBytes(Directory.GetCurrentDirectory() + "/Detection_Normal.pb");
graph.Import(model, "");
using (var session = new TFSession(graph))
{
var tensor = ImageUtil.CreateTensorFromImageFile(byte_inputDetection, TFDataType.UInt8);
var runner = session.GetRunner();
runner
.AddInput(graph["image_tensor"][0], tensor)
.Fetch("detection_boxes", "detection_scores", "detection_classes", "num_detections");
var output = runner.Run();
var boxes = (float[,,])output[0].GetValue();
var scores = (float[,])output[1].GetValue();
var classes = (float[,])output[2].GetValue();
var detections = (float[])output[3].GetValue();
check_detection = 0;
max_score = 0;
for (int i = 0; i < scores.Length; i++)
{
if ((scores[0, i] > 0.5) && (scores[0, i] > max_score))
{
max_score = scores[0, i];
float y_min = boxes[0, i, 0] * (float)bitmap_bitch.Height;
float x_min = boxes[0, i, 1] * (float)bitmap_bitch.Width;
float y_max = boxes[0, i, 2] * (float)bitmap_bitch.Height;
float x_max = boxes[0, i, 3] * (float)bitmap_bitch.Width;
P1.X = (int)x_min;
P1.Y = (int)y_min;
P2.X = (int)x_max;
P2.Y = (int)y_max;
Cv2.Rectangle(mat_drawBox, P1, P2, new Scalar(0, 255, 0), 5);
rectangle.X = (int)x_min;
rectangle.Y = (int)y_min;
rectangle.Width = (int)(x_max - x_min);
rectangle.Height = (int)(y_max - y_min);
check_detection = 1;
}
}
}
}
// 舌が検出されなかった場合,Detectionされた画像で学習したモデル(CropResize)を使用するのはまずいので
// 以前のモデル(Detectionせずに学習)を使用する
if (check_detection == 0)
{
MessageBox.Show("Error: Sorry can not detect any tongue in this image.\nPress [OK] to skip preprocessing.",
"Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
time = DateTime.Now.ToLocalTime().ToString();
File.AppendAllText("DetectionFailedLog.txt ", time + " " + imageFile + "\n");
Invoke((MethodInvoker)delegate
{
pictureBox_detection.Image = null;
pictureBox_detection.Refresh();
pictureBox_cropResized.Image = null;
pictureBox_cropResized.Refresh();
pictureBox_output.Image = null;
pictureBox_output.Refresh();
pictureBox_outputSRG.Image = null;
pictureBox_outputSRG.Refresh();
pictureBox_maskSRG.Image = null;
pictureBox_maskSRG.Refresh();
pictureBox_extraction.Image = null;
pictureBox_extraction.Refresh();
pictureBox_last.Image = null;
pictureBox_last.Refresh();
});
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_input);
byte_inputSegmentation = Bitmap2Byte(bitmap_bitch);
Thread.Sleep(1000);
modelFile = DownloadDefaultModel_noBoxPix2Pix(basepath);
using (var graph = new TFGraph())
{
var model = File.ReadAllBytes(modelFile);
graph.Import(model, "");
using (var session = new TFSession(graph))
{
var tensor = ImageUtil2.CreateTensorFromImageFile(byte_inputSegmentation);
var runner = session.GetRunner();
runner
.AddInput(graph["generator/input_image"][0], tensor)
.Fetch(graph["generator/prediction"][0]);
var output = runner.Run();
float[,,,] resultfloat = (float[,,,])output[0].GetValue(jagged: false);
for (int p = 0; p < 256; p++)
{
for (int q = 0; q < 256; q++)
{
float check = resultfloat[0, p, q, 0];
if (check < 0)
{
mask[ii] = 0;
}
else
{
mask[ii] = 255;
}
ii++;
}
}
}
}
Thread.Sleep(1000);
bitmap_bitch = ToGrayBitmap(mask, 256, 256);
mat_outputNoBox = OpenCvSharp.Extensions.BitmapConverter.ToMat(bitmap_bitch);
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_outputNoBox);
Invoke((MethodInvoker)delegate
{
pictureBox_output.Image = bitmap_bitch;
pictureBox_output.Refresh();
label1.BackColor = Color.White;
label4.BackColor = Color.Red;
});
mat_outputNoBox.SaveImage(basepath + "\\output256" + imageFile);
try
{
RemoveSmallRegion(basepath + "\\output256" + imageFile, basepath + "\\output_changed1" + imageFile, 500, 1, 1);
RemoveSmallRegion(basepath + "\\output_changed1" + imageFile, basepath + "\\output_changed2" + imageFile, 500, 0, 0);
}
catch
{
MessageBox.Show("Error: Unable to reprocess! Please check is there [RemoveSmallRegionDLL.dll] file in floder?", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
break;
}
mat_outputSRGNoBox = new Mat(basepath + "\\output_changed2" + imageFile, ImreadModes.GrayScale);
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_outputSRGNoBox);
Invoke((MethodInvoker)delegate
{
pictureBox_outputSRG.Image = bitmap_bitch;
pictureBox_outputSRG.Refresh();
label4.BackColor = Color.White;
label5.BackColor = Color.Red;
});
Cv2.Resize(mat_outputSRGNoBox, mat_mask, mat_input.Size());
mat_mask.SaveImage(basepath + "\\mask" + imageFile);
try
{
RemoveSmallRegion(basepath + "\\mask" + imageFile, basepath + "\\mask_changed1" + imageFile, 500, 1, 1);
RemoveSmallRegion(basepath + "\\mask_changed1" + imageFile, basepath + "\\mask_changed2" + imageFile, 500, 0, 0);
}
catch
{
MessageBox.Show("Error: Unable to reprocess! Please check is there [RemoveSmallRegionDLL.dll] file in floder?", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
break;
}
mat_maskSRG = new Mat(basepath + "\\mask_changed2" + imageFile, ImreadModes.GrayScale);
Cv2.Threshold(mat_maskSRG, mat_maskSRG, 128, 255, ThresholdTypes.Binary);
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_maskSRG);
Invoke((MethodInvoker)delegate
{
pictureBox_maskSRG.Image = bitmap_bitch;
pictureBox_maskSRG.Refresh();
label5.BackColor = Color.White;
label6.BackColor = Color.Red;
});
mat_extraction = mat_input.Clone();
areaCount = 0;
for (int i = 0; i < mat_input.Height; i++)
{
for (int j = 0; j < mat_input.Width; j++)
{
Vec3b pix = mat_extraction.At<Vec3b>(i, j);
if (mat_maskSRG.At<int>(i, j) == 0)
{
pix[0] = (byte)(255);
pix[1] = (byte)(255);
pix[2] = (byte)(255);
mat_extraction.Set<Vec3b>(i, j, pix);
}
else
{
pix[0] = (byte)(mat_extraction.At<Vec3b>(i, j).Item0);
pix[1] = (byte)(mat_extraction.At<Vec3b>(i, j).Item1);
pix[2] = (byte)(mat_extraction.At<Vec3b>(i, j).Item2);
mat_extraction.Set<Vec3b>(i, j, pix);
areaCount++;
}
}
}
mat_extraction.SaveImage(basepath + "\\extraction" + imageFile);
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_extraction);
Invoke((MethodInvoker)delegate
{
pictureBox_extraction.Image = bitmap_bitch;
pictureBox_extraction.Refresh();
});
label6.BackColor = Color.White;
label7.BackColor = Color.Red;
label7.BackColor = Color.White;
label8.BackColor = Color.Red;
//csvにboundingbox情報を保存
sw.WriteLine(
imageFile.Substring(1) + ","
+ "None" + "," + "None" + ","
+ "None" + "," + "None" + ","
+ "None" + "," + "None" + "," + areaCount.ToString()
);
continue;
}
// 舌が正常にDetectionされた場合の処理
else
{
// 検出されたバウンディングボックス画像を保存
mat_drawBox.SaveImage(basepath + "\\detection" + imageFile);
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_drawBox);
Invoke((MethodInvoker)delegate
{
pictureBox_detection.Image = bitmap_bitch;
pictureBox_detection.Refresh();
});
label1.BackColor = Color.White;
// 検出領域でcropし,256*256にリサイズして表示
label2.BackColor = Color.Red;
for (int i = P1.Y; i < P2.Y; i++)
{
for (int j = P1.X; j < P2.X; j++)
{
// 舌領域以外を黒へ
Vec3b pix = mat_input.At<Vec3b>(i, j);
mat_cropped.Set<Vec3b>(i, j, pix);
}
}
mat_cropped.SaveImage(basepath + "\\cropped" + imageFile);
// 検出領域の範囲を切り出す
OpenCvSharp.Size size_roi = new OpenCvSharp.Size();
size_roi.Height = rectangle.Height;
size_roi.Width = rectangle.Width;
roi = new Rect(P1, size_roi);
mat_roisize = mat_input.Clone(roi);
// セグメンテーションのため,256*256にリサイズ
Cv2.Resize(mat_roisize, mat_roi, mat_roi256.Size());
mat_roi.SaveImage(basepath + "\\cropresized" + imageFile);
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_roi);
Invoke((MethodInvoker)delegate
{
pictureBox_cropResized.Image = bitmap_bitch;
pictureBox_cropResized.Refresh();
});
label2.BackColor = Color.White;
// セグメンテーションを行う
label3.BackColor = Color.Red;
byte_inputSegmentation = Bitmap2Byte(bitmap_bitch);
Thread.Sleep(1000);
modelFile = DownloadDefaultModel(basepath);
using (var graph = new TFGraph())
{
var model = File.ReadAllBytes(modelFile);
graph.Import(model, "");
using (var session = new TFSession(graph))
{
var tensor = ImageUtil2.CreateTensorFromImageFile(byte_inputSegmentation);
var runner = session.GetRunner();
runner
.AddInput(graph["generator/input_image"][0], tensor)
.Fetch(graph["generator/prediction"][0]);
//.AddInput(graph["input_image"][0], tensor)
//.Fetch(graph["generator1/decoder_1/Tanh"][0]);
var output = runner.Run();
float[,,,] resultfloat = (float[,,,])output[0].GetValue(jagged: false);
for (int p = 0; p < 256; p++)
{
for (int q = 0; q < 256; q++)
{
float check = resultfloat[0, p, q, 0];
if (check < 0)
{
mask[ii] = 0;
}
else
{
mask[ii] = 255;
}
ii++;
}
}
}
}
GC.Collect();
Thread.Sleep(1000);
bitmap_bitch = ToGrayBitmap(mask, 256, 256);
mat_output = OpenCvSharp.Extensions.BitmapConverter.ToMat(bitmap_bitch);
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_output);
Invoke((MethodInvoker)delegate
{
pictureBox_output.Image = bitmap_bitch;
pictureBox_output.Refresh();
});
label3.BackColor = Color.White;
// 舌分割結果の後処理
label4.BackColor = Color.Red;
// 舌分割結果を保存
mat_output.SaveImage(basepath + "\\output256" + imageFile);
// 後処理(領域拡張法)でノイズ除去
try
{
RemoveSmallRegion(basepath + "\\output256" + imageFile, basepath + "\\output_changed1" + imageFile, 500, 1, 1);
RemoveSmallRegion(basepath + "\\output_changed1" + imageFile, basepath + "\\output_changed2" + imageFile, 500, 0, 0);
}
catch
{
MessageBox.Show("Error: Unable to reprocess! Please check is there [RemoveSmallRegionDLL.dll] file in floder?", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
break;
}
mat_outputSRG = new Mat(basepath + "\\output_changed2" + imageFile, ImreadModes.GrayScale);
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_outputSRG);
Invoke((MethodInvoker)delegate
{
pictureBox_outputSRG.Image = bitmap_bitch;
pictureBox_outputSRG.Refresh();
});
label4.BackColor = Color.White;
label5.BackColor = Color.Red;
// バウンディングボックスのサイズに舌分割結果をリサイズ
// この時に2値じゃなくなってるみたい
Cv2.Resize(mat_outputSRG, mat_outputChanged, mat_roisize.Size());
mat_outputChanged.SaveImage(basepath + "\\output_resized" + imageFile);
// 舌検出された領域において舌領域を切り出す
mat_mask = new Mat(mat_input.Size(), MatType.CV_8UC1, 1);
for (int i = P1.Y; i < P2.Y; i++)
{
for (int j = P1.X; j < P2.X; j++)
{
int pix = mat_outputChanged.At<int>(mmp, pmm);
mat_mask.Set<int>(i, j, pix);
pmm++;
}
mmp++;
pmm = 0;
}
mmp = 0;
Cv2.Resize(mat_mask, mat_mask, mat_input.Size());
mat_mask.SaveImage(basepath + "\\mask" + imageFile);
// ノイズ処理
try
{
RemoveSmallRegion(basepath + "\\mask" + imageFile, basepath + "\\mask_changed1" + imageFile, 500, 1, 1);
RemoveSmallRegion(basepath + "\\mask_changed1" + imageFile, basepath + "\\mask_changed2" + imageFile, 500, 0, 0);
}
catch
{
MessageBox.Show("Error: Unable to reprocess! Please check is there [RemoveSmallRegionDLL.dll] file in floder?", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
break;
}
mat_maskSRG = new Mat(basepath + "\\mask_changed2" + imageFile, ImreadModes.GrayScale);
Cv2.Threshold(mat_maskSRG, mat_maskSRG, 128, 255, ThresholdTypes.Binary);
// 2値マスクの最終結果
mat_maskSRG.SaveImage(basepath + "\\mask_final" + imageFile);
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_maskSRG);
Invoke((MethodInvoker)delegate
{
pictureBox_maskSRG.Image = bitmap_bitch;
pictureBox_maskSRG.Refresh();
});
label5.BackColor = Color.White;
// 元画像とマスクを合わせて,舌領域を抜き出す
label6.BackColor = Color.Red;
mat_extraction = mat_input.Clone();
areaCount = 0;
for (int i = 0; i < mat_input.Height; i++)
{
for (int j = 0; j < mat_input.Width; j++)
{
Vec3b pix = mat_extraction.At<Vec3b>(i, j);
if (mat_maskSRG.At<int>(i, j) == 0)
{
pix[0] = (byte)(255);
pix[1] = (byte)(255);
pix[2] = (byte)(255);
mat_extraction.Set<Vec3b>(i, j, pix);
}
else
{
pix[0] = (byte)(mat_extraction.At<Vec3b>(i, j).Item0);
pix[1] = (byte)(mat_extraction.At<Vec3b>(i, j).Item1);
pix[2] = (byte)(mat_extraction.At<Vec3b>(i, j).Item2);
mat_extraction.Set<Vec3b>(i, j, pix);
areaCount++;
}
}
}
mat_extraction.SaveImage(basepath + "\\extraction" + imageFile);
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_extraction);
Invoke((MethodInvoker)delegate
{
pictureBox_extraction.Image = bitmap_bitch;
pictureBox_extraction.Refresh();
});
label6.BackColor = Color.White;
// Gloss抽出処理
// extraction結果は使用しない(255,255,255の扱いが煩雑なため)
label7.BackColor = Color.Red;
label7.BackColor = Color.White;
// 処理が終わった画像を記録する
label8.BackColor = Color.Red;
time = DateTime.Now.ToLocalTime().ToString();
File.AppendAllText("Log.txt ", time + " " + imageFile + " Done!\n");
//csvにboundingbox情報を保存
sw.WriteLine(
imageFile.Substring(1) + ","
+ P1.X.ToString() + "," + P1.Y.ToString() + ","
+ P2.X.ToString() + "," + P2.Y.ToString() + ","
+ Math.Abs(P1.X - P2.X).ToString() + "," + Math.Abs(P1.Y - P2.Y).ToString() + ","
+ areaCount.ToString() + ","
);
}
GC.Collect();
}
MessageBox.Show("Finished!");
Invoke((MethodInvoker)delegate
{
button_start.Enabled = true;
button_pause.Enabled = false;
label_processingFileName.Text = "Processing File: None";
});
}
}
private void BackgroundWorker1_DoWork_ImageandCalib(object sender, DoWorkEventArgs e)
{
// boundingboxなどのinfo出力用
sw = new StreamWriter(fileName_info, false, System.Text.Encoding.GetEncoding("shift_jis"));
// 出力用csvの準備
sw.Write(
"image" + "," +
"top left X" + "," + "top left Y" + "," + "bottom right X" + "," + "bottom right Y" + ","
);
for (int i = 0; i < 1; i++)
for (int j = 0; j < 5; j++)
sw.Write("x" + j.ToString() + "," + "y" + j.ToString() + ",");
for (int i = 0; i < 1; i++)
for (int j = 0; j < 8; j++)
sw.Write("x" + j.ToString() + "," + "y" + j.ToString() + ",");
sw.WriteLine();
sw.Close();
using (MemoryStream ms = new MemoryStream())
{
// ディレクトリglobの取得
var glob_dir = Directory.GetDirectories(@"data\errors");
for (int a = 0; a < glob_dir.Length; a++)
{
manualReset.WaitOne();
// 画像と校正ファイルのパス
var glob_file = Directory.GetFiles(glob_dir[a]);
var path_calib = glob_file.Where(n => n.Contains("csv")).ToList()[0];
var path_image = glob_file.Where(n => n.Contains("bmp") || n.Contains("png")).ToList()[0];
var path_base = Directory.GetCurrentDirectory();
var name_image = Path.GetFileName(path_image);
var name_dir = Path.GetFileName(glob_dir[a]);
// ステータスの表示
Invoke((MethodInvoker)delegate
{
label_processingFileName.Text = "Processing File: " + "\\" + name_dir;
count = a + 1;
label_totalProgress.Text = "Total Progress: " + count + "/" + glob_dir.Length;
});
// 入力画像読み込み・表示
mat_input = Cv2.ImRead(path_image, ImreadModes.Color);
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_input);
Invoke((MethodInvoker)delegate
{
pictureBox_input.Image = bitmap_bitch;
pictureBox_input.Refresh();
});
// Detection
label8.BackColor = Color.White;
label1.BackColor = Color.Red;
mat_drawBox = mat_input.Clone();
mat_cropped = new Mat(mat_input.Size(), MatType.CV_8UC3, 1);
byte_inputDetection = Bitmap2Byte(bitmap_bitch);
using (var graph = new TFGraph())
{
var model = File.ReadAllBytes(path_base + "/Detection_Normal.pb");
graph.Import(model, "");
using (var session = new TFSession(graph))
{
var tensor = ImageUtil.CreateTensorFromImageFile(byte_inputDetection, TFDataType.UInt8);
var runner = session.GetRunner();
runner
.AddInput(graph["image_tensor"][0], tensor)
.Fetch("detection_boxes", "detection_scores", "detection_classes", "num_detections");
var output = runner.Run();
var boxes = (float[,,])output[0].GetValue();
var scores = (float[,])output[1].GetValue();
var classes = (float[,])output[2].GetValue();
var detections = (float[])output[3].GetValue();
check_detection = 0;
max_score = 0;
for (int i = 0; i < scores.Length; i++)
{
if ((scores[0, i] > 0.5) && (scores[0, i] > max_score))
{
max_score = scores[0, i];
float y_min = boxes[0, i, 0] * (float)bitmap_bitch.Height;
float x_min = boxes[0, i, 1] * (float)bitmap_bitch.Width;
float y_max = boxes[0, i, 2] * (float)bitmap_bitch.Height;
float x_max = boxes[0, i, 3] * (float)bitmap_bitch.Width;
P1.X = (int)x_min;
P1.Y = (int)y_min;
P2.X = (int)x_max;
P2.Y = (int)y_max;
Cv2.Rectangle(mat_drawBox, P1, P2, new Scalar(0, 255, 0), 5);
rectangle.X = (int)x_min;
rectangle.Y = (int)y_min;
rectangle.Width = (int)(x_max - x_min);
rectangle.Height = (int)(y_max - y_min);
check_detection = 1;
}
}
}
}
// Detection結果表示・保存
mat_drawBox.SaveImage(path_base + "\\detection" + "\\" + name_dir + ".bmp");
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_drawBox);
Invoke((MethodInvoker)delegate
{
pictureBox_detection.Image = bitmap_bitch;
pictureBox_detection.Refresh();
});
label1.BackColor = Color.White;
// 舌検出領域でcrop
label2.BackColor = Color.Red;
for (int i = P1.Y; i < P2.Y; i++)
{
for (int j = P1.X; j < P2.X; j++)
{
Vec3b pix = mat_input.At<Vec3b>(i, j);
mat_cropped.Set<Vec3b>(i, j, pix);
}
}
mat_cropped.SaveImage(path_base + "\\cropped" + "\\" + name_dir + ".bmp");
// 舌検出領域でresize
OpenCvSharp.Size size_roi = new OpenCvSharp.Size();
size_roi.Height = rectangle.Height;
size_roi.Width = rectangle.Width;
roi = new Rect(P1, size_roi);
mat_roisize = mat_input.Clone(roi);
Cv2.Resize(mat_roisize, mat_roi, mat_roi256.Size());
mat_roi.SaveImage(path_base + "\\cropresized" + "\\" + name_dir + ".bmp");
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_roi);
Invoke((MethodInvoker)delegate
{
pictureBox_cropResized.Image = bitmap_bitch;
pictureBox_cropResized.Refresh();
});
label2.BackColor = Color.White;
// Segmenation
label3.BackColor = Color.Red;
byte_inputSegmentation = Bitmap2Byte(bitmap_bitch);
Thread.Sleep(1000);
modelFile = DownloadDefaultModel(path_base);
ii = 0;
using (var graph = new TFGraph())
{
var model = File.ReadAllBytes(modelFile);
graph.Import(model, "");
using (var session = new TFSession(graph))
{
var tensor = ImageUtil2.CreateTensorFromImageFile(byte_inputSegmentation);
var runner = session.GetRunner();
runner
//.AddInput(graph["generator/input_image"][0], tensor)
//.Fetch(graph["generator/prediction"][0]);
.AddInput(graph["input_image"][0], tensor)
.Fetch(graph["generator1/decoder_1/Tanh"][0]);
var output = runner.Run();
float[,,,] resultfloat = (float[,,,])output[0].GetValue(jagged: false);
for (int p = 0; p < 256; p++)
{
for (int q = 0; q < 256; q++)
{
float check = resultfloat[0, p, q, 0];
if (check < 0)
{
mask[ii] = 0;
}
else
{
mask[ii] = 255;
}
ii++;
}
}
}
}
GC.Collect();
Thread.Sleep(1000);
// segmentation結果表示・保存
bitmap_bitch = ToGrayBitmap(mask, 256, 256);
mat_output = OpenCvSharp.Extensions.BitmapConverter.ToMat(bitmap_bitch);
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_output);
Invoke((MethodInvoker)delegate
{
pictureBox_output.Image = bitmap_bitch;
pictureBox_output.Refresh();
});
label3.BackColor = Color.White;
label4.BackColor = Color.Red;
mat_output.SaveImage(path_base + "\\output256" + "\\" + name_dir + ".bmp");
// 後処理(領域拡張法)でノイズ除去 iteration 2
try
{
RemoveSmallRegion(path_base + "\\output256" + "\\" + name_dir + ".bmp", path_base + "\\output_changed1" + "\\" + name_dir + ".bmp", 500, 1, 1);
RemoveSmallRegion(path_base + "\\output_changed1" + "\\" + name_dir + ".bmp", path_base + "\\output_changed2" + "\\" + name_dir + ".bmp", 500, 0, 0);
}
catch
{
MessageBox.Show("Error: Unable to reprocess! Please check is there [RemoveSmallRegionDLL.dll] file in floder?", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
break;
}
mat_outputSRG = new Mat(path_base + "\\output_changed2" + "\\" + name_dir + ".bmp", ImreadModes.GrayScale);
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_outputSRG);
Invoke((MethodInvoker)delegate
{
pictureBox_outputSRG.Image = bitmap_bitch;
pictureBox_outputSRG.Refresh();
});
label4.BackColor = Color.White;
label5.BackColor = Color.Red;
// バウンディングボックスのサイズに戻す
Cv2.Resize(mat_outputSRG, mat_outputChanged, mat_roisize.Size());
mat_outputChanged.SaveImage(path_base + "\\output_resized" + "\\" + name_dir + ".bmp");
// 入力と同じサイズでマスクを作成(なんかたまに不安定.resizeで微妙にサイズ変わってrange overしれるかも)
// 気休め
Thread.Sleep(100);
GC.Collect();
mat_mask = new Mat(mat_input.Size(), MatType.CV_8UC1, 0);
var y_mask = 0;
for (int y = P1.Y; y < P2.Y; y++)
{
if (y_mask >= mat_outputChanged.Height)
break;
var x_mask = 0;
for (int x = P1.X; x < P2.X; x++)
{
if (x_mask >= mat_outputChanged.Width)
break;
int pix = mat_outputChanged.At<int>(y_mask, x_mask);
mat_mask.Set<int>(y, x, pix);
x_mask++;
}
y_mask++;
}
mmp = 0;
Cv2.Resize(mat_mask, mat_mask, mat_input.Size());
mat_mask.SaveImage(path_base + "\\mask" + "\\" + name_dir + ".bmp");
// ノイズ処理
try
{
RemoveSmallRegion(path_base + "\\mask" + "\\" + name_dir + ".bmp", path_base + "\\mask_changed1" + "\\" + name_dir + ".bmp", 500, 1, 1);
RemoveSmallRegion(path_base + "\\mask_changed1" + "\\" + name_dir + ".bmp", path_base + "\\mask_changed2" + "\\" + name_dir + ".bmp", 500, 0, 0);
}
catch
{
MessageBox.Show("Error: Unable to reprocess! Please check is there [RemoveSmallRegionDLL.dll] file in floder?", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
break;
}
mat_maskSRG = new Mat(path_base + "\\mask_changed2" + "\\" + name_dir + ".bmp", ImreadModes.GrayScale);
Cv2.Threshold(mat_maskSRG, mat_maskSRG, 128, 255, ThresholdTypes.Binary);
// 2値マスクの最終結果
mat_maskSRG.SaveImage(path_base + "\\mask_final" + "\\" + name_dir + ".bmp");
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_maskSRG);
Invoke((MethodInvoker)delegate
{
pictureBox_maskSRG.Image = bitmap_bitch;
pictureBox_maskSRG.Refresh();
});
label5.BackColor = Color.White;
// 元画像とマスクを合わせて,舌領域を抜き出す
// todo: opencv等によるマスク処理と領域計算へ
label6.BackColor = Color.Red;
mat_extraction = mat_input.Clone();
areaCount = 0;
for (int i = 0; i < mat_input.Height; i++)
{
for (int j = 0; j < mat_input.Width; j++)
{
Vec3b pix = mat_extraction.At<Vec3b>(i, j);
if (mat_maskSRG.At<int>(i, j) == 0)
{
pix[0] = (byte)(255);
pix[1] = (byte)(255);
pix[2] = (byte)(255);
mat_extraction.Set<Vec3b>(i, j, pix);
}
else
{
pix[0] = (byte)(mat_extraction.At<Vec3b>(i, j).Item0);
pix[1] = (byte)(mat_extraction.At<Vec3b>(i, j).Item1);
pix[2] = (byte)(mat_extraction.At<Vec3b>(i, j).Item2);
mat_extraction.Set<Vec3b>(i, j, pix);
areaCount++;
}
}
}
mat_extraction.SaveImage(path_base + "\\extraction" + "\\" + name_dir + ".bmp");
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_extraction);
Invoke((MethodInvoker)delegate
{
pictureBox_extraction.Image = bitmap_bitch;
pictureBox_extraction.Refresh();
});
label6.BackColor = Color.White;
/// 5点クリックによる色抽出処理
var path_colorMatrixXYZ = "xyz.txt";
// 色抽出処理のクラス
ColorExtractor ce = new ColorExtractor();
// マスクされた舌領域画像の作成
Mat mat_finalMask = mat_maskSRG.Clone();
Mat mat_maskedImg = new Mat();
mat_input.CopyTo(mat_maskedImg, mat_finalMask);
// 5点クリック法(2010石川)
List<OpenCvSharp.Point> list_5points_3 = ce.Get5points(mat_finalMask, ColorExtractor.FivePointMethod.Method3);
// 8領域の取得
List<OpenCvSharp.Point> list_8area_3 = ce.Get8area(list_5points_3);
// 領域の表示
var mat_areaDicision = ce.ShowResult(mat_input.Clone(), list_5points_3, list_8area_3);
// 色抽出
List<OpenCvSharp.Scalar> list_8Bgr = ce.Get8colors(mat_maskedImg, list_8area_3);
// 色変換(RGB->XYZ->Lab)
List<OpenCvSharp.Scalar> list_8Lab = ce.Calc8Lab(list_8Bgr, path_calib, path_colorMatrixXYZ);
// 保存
string CSVfilename = path_base + "\\color" + "\\" + name_dir + ".csv";
FileStream CSV_file = File.Open(CSVfilename, FileMode.OpenOrCreate, FileAccess.Write);
CSV_file.Seek(0, SeekOrigin.Begin);
CSV_file.SetLength(0);
CSV_file.Close();
StreamWriter CSV_data = new StreamWriter(CSVfilename);
CSV_data.WriteLine("Area,R,G,B,L,a,B");
for (int i = 0; i < list_8Bgr.Count(); i++)
{
string str = (i + 1).ToString() + ",";
str +=
list_8Bgr[i].Val2.ToString("0.0000") + "," +
list_8Bgr[i].Val1.ToString("0.0000") + "," +
list_8Bgr[i].Val0.ToString("0.0000") + "," +
list_8Lab[i].Val0.ToString("0.0000") + "," +
list_8Lab[i].Val1.ToString("0.0000") + "," +
list_8Lab[i].Val2.ToString("0.0000");
CSV_data.WriteLine(str);
}
CSV_data.Close();
// 廃棄
mat_finalMask.Dispose();
mat_maskedImg.Dispose();
GC.Collect();
System.Threading.Thread.Sleep(100);
mat_areaDicision.SaveImage(path_base + "\\autoAreaDecision" + "\\" + name_dir + ".bmp");
bitmap_bitch = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat_areaDicision);
Invoke((MethodInvoker)delegate
{
pictureBox_last.Image = bitmap_bitch;
pictureBox_last.Refresh();
});
label7.BackColor = Color.White;
// 処理log
label8.BackColor = Color.Red;
time = DateTime.Now.ToLocalTime().ToString();
File.AppendAllText("Log.txt ", time + " " + "\\" + name_dir + ".bmp" + " Done!\n");
// info出力
sw = new StreamWriter(fileName_info, true, System.Text.Encoding.GetEncoding("shift_jis"));
sw.Write(
name_dir.ToString() + ","
// bounding box
+ P1.X.ToString() + "," + P1.Y.ToString() + ","
+ P2.X.ToString() + "," + P2.Y.ToString() + ","
// area
//+ areaCount.ToString() + ","
);
foreach (var n in list_5points_3)
sw.Write(n.X + "," + n.Y + ",");
foreach (var n in list_8area_3)
sw.Write(n.X + "," + n.Y + ",");
sw.Write("\n");
sw.Close();
// 破棄
}
GC.Collect();
MessageBox.Show("Finished!");
Invoke((MethodInvoker)delegate
{
button_start.Enabled = true;
button_pause.Enabled = false;
label_processingFileName.Text = "Processing File: None";
});
}
}
private void Button_pause_Click(object sender, EventArgs e)
{
if (button_pause.Text == "Pause")
{
manualReset.Reset();
button_pause.Text = "Continue";
}
else
{
manualReset.Set();
button_pause.Text = "Pause";
}
}
private void Form1_FormClosing(object sender, FormClosingEventArgs e)
{
Console.WriteLine("file closing");
if (sw != null)
{
sw.Close();
}
Console.WriteLine("file closed");
}
private void comboBox1_SelectedIndexChanged(object sender, EventArgs e)
{
}
}
}