Newer
Older
opticalFlowProcessing / IRImageProcessing / Source.cpp
#include "myOpenCV3.h"
#include <bitset>
#include <string>
#include <filesystem>
#include <vector>

void thinningIteration(cv::Mat& img, int iteration) {
    cv::Mat marker = cv::Mat::ones(img.size(), CV_8UC1);

    for (int y = 1; y < img.rows - 1; ++y) {
        for (int x = 1; x < img.cols - 1; ++x) {
            uchar p2 = img.at<uchar>(y - 1, x);
            uchar p3 = img.at<uchar>(y - 1, x + 1);
            uchar p4 = img.at<uchar>(y, x + 1);
            uchar p5 = img.at<uchar>(y + 1, x + 1);
            uchar p6 = img.at<uchar>(y + 1, x);
            uchar p7 = img.at<uchar>(y + 1, x - 1);
            uchar p8 = img.at<uchar>(y, x - 1);
            uchar p9 = img.at<uchar>(y - 1, x - 1);

            int C = (!p2 & (p3 | p4)) + (!p4 & (p5 | p6)) +
                (!p6 & (p7 | p8)) + (!p8 & (p9 | p2));
            int N1 = (p9 | p2) + (p3 | p4) + (p5 | p6) + (p7 | p8);
            int N2 = (p2 | p3) + (p4 | p5) + (p6 | p7) + (p8 | p9);
            int N = N1 < 3 ? N1 : N2;
            int m = iteration == 0 ? ((p6 | p7 | (!p9)) & p8) : ((p2 | p3 | (!p5)) & p4);

            if (C == 1 && (N >= 2 && N <= 3) & m == 0)
                marker.at<uchar>(y, x) = 0;
        }
    }
    img &= marker;
}

void thinning(const cv::Mat& src, cv::Mat& dst) {
    dst = src.clone();
    dst /= 255;
    // 正規化
    cv::Mat normalizedImage;
    cv::normalize(src, normalizedImage, 0, 1, cv::NORM_MINMAX, CV_32F);
    cv::Mat prev = cv::Mat::zeros(dst.size(), CV_8UC1);
    cv::Mat diff;

    do {
        thinningIteration(dst, 0);
        thinningIteration(dst, 1);
        cv::absdiff(dst, prev, diff);
        dst.copyTo(prev);
    } while (cv::countNonZero(diff) > 0);
    dst *= 255;
}
std::vector<cv::Point> findBranchPoints(cv::Mat& img)
{
    std::vector<cv::Point> branchPoints;

    for (int y = 1; y < img.rows - 1; ++y)
    {
        for (int x = 1; x < img.cols - 1; ++x)
        {
            // 画像が白(または黒)でない場合は無視
            if (img.at<uchar>(y, x) == 255) continue;

            // 8近傍の白(または黒)ピクセルの数をカウント
            int count = 0;
            if (img.at<uchar>(y - 1, x - 1) > 0) ++count;
            if (img.at<uchar>(y - 1, x) > 0) ++count;
            if (img.at<uchar>(y - 1, x + 1) > 0) ++count;
            if (img.at<uchar>(y, x - 1) > 0) ++count;
            if (img.at<uchar>(y, x + 1) > 0) ++count;
            if (img.at<uchar>(y + 1, x - 1) > 0) ++count;
            if (img.at<uchar>(y + 1, x) > 0) ++count;
            if (img.at<uchar>(y + 1, x + 1) > 0) ++count;

            // 白(または黒)ピクセルが3つ以上ある場合は分岐点として記録
            if (count >= 3)
                branchPoints.push_back(cv::Point(x, y));
        }
    }

    return branchPoints;
}

std::vector<cv::Point> extractBifurcationPoints(cv::Mat& thinnedImage) {
    int rows = thinnedImage.rows;
    int cols = thinnedImage.cols;
    int neighbor[2][8] = { {-1, -1, 0, 1, 1, 1, 0, -1}, {0, 1, 1, 1, 0, -1, -1, -1} };
    std::vector<cv::Point> bifurcationPoints;

    for (int i = 1; i < rows - 1; i++) {
        for (int j = 1; j < cols - 1; j++) {
            if (thinnedImage.at<uchar>(i, j) == 255) {
                int transitions = 0;
                int neighborCount = 0;

                for (int k = 0; k < 8; k++) {
                    int x = i + neighbor[0][k];
                    int y = j + neighbor[1][k];

                    int nextX = i + neighbor[0][(k + 1) % 8];
                    int nextY = j + neighbor[1][(k + 1) % 8];

                    if (thinnedImage.at<uchar>(x, y) == 255) {
                        neighborCount++;

                        if (thinnedImage.at<uchar>(nextX, nextY) == 0)
                            transitions++;
                    }
                }

                if (transitions == 1 && neighborCount == 3)
                    bifurcationPoints.push_back(cv::Point(j, i));  // OpenCVでは、x座標が列番号、y座標が行番号に対応します。
            }
        }
    }

    return bifurcationPoints;
}

cv::Mat inverseImage(const cv::Mat& src)
{
    cv::Mat invertedImage;
    invertedImage  = 255 - src;
    return invertedImage;
}

cv::Mat maximumImage(const cv::Mat& img1, const cv::Mat& img2) {
    

    cv::Mat maxImage = img1.clone(); // 結果を格納するMatを作成(img1のコピーを使用)

    cv::max(img1, img2, maxImage);

    return maxImage;
}

void BinaryProcessing(const cv::Mat& ori, const cv::Mat& src, std::string dirName)
{
    // 大津の二値化
    cv::Mat otsuBinaryImage;
    cv::threshold(src, otsuBinaryImage, 0, 255, cv::THRESH_BINARY_INV | cv::THRESH_OTSU);
    // 適応的二値化
    cv::Mat adaptiveBinaryImage;
    cv::adaptiveThreshold(src, adaptiveBinaryImage, 255, cv::ADAPTIVE_THRESH_MEAN_C, cv::THRESH_BINARY_INV, 41, 10);

    // モルフォジー変換用のカーネル
    cv::Mat element = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
    // モルフォロジー変換
    cv::Mat morphologyImage;
    cv::morphologyEx(adaptiveBinaryImage, morphologyImage, cv::MORPH_OPEN, element);
    
    cv::Mat borderRemoved = morphologyImage(cv::Rect(10, 10, morphologyImage.cols - 20, morphologyImage.rows - 20));
    cv::Mat borderAdded;
    cv::copyMakeBorder(borderRemoved, borderAdded, 10, 10, 10, 10, cv::BORDER_CONSTANT, 255);

    cv::Mat invImage;
    cv::Mat thinnedImage;
    thinning(morphologyImage, thinnedImage);


    cv::Mat kernel1 = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));

    // 細線化画像を再度膨張
    cv::Mat outputImage;
    cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(5, 5));
    cv::dilate(thinnedImage, outputImage, kernel, cv::Point(-1, -1), 2);
    



    cv::bitwise_not(adaptiveBinaryImage, adaptiveBinaryImage);
    cv::bitwise_not(morphologyImage, morphologyImage);
    cv::bitwise_not(outputImage, outputImage);
    cv::bitwise_not(thinnedImage, thinnedImage);


    cv::imwrite(dirName + "/OtsuBinaryImage.png", otsuBinaryImage);
    cv::imwrite(dirName + "/AdaptiveBinary.png", adaptiveBinaryImage);
    cv::imwrite(dirName + "/Morpho.png", morphologyImage);
    cv::imwrite(dirName + "/Thinnig.png", thinnedImage);
    cv::imwrite(dirName + "/DilatedThinning.png", outputImage);


    cv::Mat circleImage;
    ori.convertTo(circleImage, CV_8U);
    cv::Mat circleImageRGB;
    cv::cvtColor(circleImage, circleImageRGB, cv::COLOR_GRAY2BGR);  // グレースケールからRGBへ変換

    std::vector<cv::Point> branchPoints = extractBifurcationPoints(thinnedImage);  // 分岐点を検出

    // 分岐点に円を描画
    for (const auto& point : branchPoints) {
        cv::circle(circleImageRGB, point, 10, cv::Scalar(0, 0, 255), 2);  // 半径3の円を赤色で描画します
    }

    cv::imwrite(dirName + "/Circle.png", circleImageRGB);

}

void WindowProcessing(const cv::Mat& src16U, cv::Mat& dst8U, double limit, std::string dirName)
{

    double amin, amax;
    cv::minMaxLoc(src16U, &amin, &amax);

    // スケーリング係数を計算
    double scale = limit / (amax - amin);

    src16U.convertTo(dst8U, CV_8U, scale, -amin * scale);
    //cv::imwrite(dirName + "/Windowed_2.png", dst8U);
}

void GaborProcessing(const cv::Mat& ori, cv::Mat& src, std::string dirName)
{

    int division = 16;
    int ksize = 51;
    double sigma = 18;
    double theta;
    double lambda = 42;
    double gamma = 1;
    double psi = 0;
    double angle = 0;

    cv::Mat maxGaborImage;
    double maxGaborSum = -1.0;
    int maxAngle = -1;
    cv::Mat src64F;
    src.convertTo(src64F, CV_64F);
    cv::Mat margeImage = cv::Mat::zeros(src64F.size(), CV_64F);
    cv::Mat marge8UImage;

    // Find the angle that gives the maximum sum of gaborImage8U
    for (int i = 0; i < division; i++, angle += (180 / division)) 
    {
        theta = angle * CV_PI / 180.0;
        // opencvのgabor filter
        cv::Mat kern = cv::getGaborKernel(cv::Size(ksize, ksize), sigma, theta, lambda, gamma, psi, CV_64F);
        kern /= cv::sum(kern)[0];

        cv::Mat gaborImage, gabor8UImage, gaborBinaryImage;
        cv::filter2D(src64F, gaborImage, CV_64F, kern);

        //画像の型変換と正規化
        double minVal, maxVal;
        cv::minMaxLoc(gaborImage, &minVal, &maxVal); // gaborImageの最小値と最大値を取得
        margeImage = maximumImage(gaborImage, margeImage);


        gaborImage.convertTo(gabor8UImage, CV_8U, 255.0 / (maxVal - minVal), -minVal * 255.0 / (maxVal - minVal));
        margeImage.convertTo(marge8UImage, CV_8U, 255.0 / (maxVal - minVal), -minVal * 255.0 / (maxVal - minVal));
        //cv::threshold(gabor8UImage, gaborBinaryImage, 128 , 255, cv::THRESH_BINARY);
        
        //cv::adaptiveThreshold(gabor8UImage, gaborBinaryImage, 255, cv::ADAPTIVE_THRESH_MEAN_C, cv::THRESH_BINARY, 101, 10);

        // カーネルの値を0から255の範囲に正規化
        double kernMinVal, kernMaxVal;
        cv::minMaxLoc(kern, &kernMinVal, &kernMaxVal); // 最小値と最大値を取得
        kern = 255 * (kern - kernMinVal) / (kernMaxVal - kernMinVal);

        // CV_64FからCV_8Uに変換
        cv::Mat kern8U;
        kern.convertTo(kern8U, CV_8U);

        cv::Mat gaborImageScaled, diffImage;

        // 差分画像
        //cv::absdiff(src, gabor8UImage, diffImage);
        cv::convertScaleAbs(diffImage, gaborImageScaled, 1.0 / division);

        std::string fileStr = std::to_string(i) + "_over_" + std::to_string(division) + "pi";
        // 画像の保存
        cv::imwrite(dirName + "/Gabor_" + fileStr + ".png", gaborImage);
        cv::imwrite(dirName + "/Gabor8U_" + fileStr + ".png", gabor8UImage);
        //cv::imwrite(dirName + "/diff_" + fileStr + ".png", diffImage);
        //cv::imwrite(dirName + "/GaborOtsuBinaryImage_" + fileStr + ".png", gaborBinaryImage);

        // カーネルを画像ファイルとして保存
        cv::imwrite(dirName + "/Kernel_" + fileStr + ".png", kern);

        // 画像の合成
        //cv::add(margeImage, gaborImageScaled, margeImage, cv::noArray(), CV_8U);

    }

    cv::Mat margeOtsuBinaryImage;
    cv::threshold(marge8UImage, margeOtsuBinaryImage, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
    cv::imwrite(dirName + "/MargeOtsuBinaryImage.png", margeOtsuBinaryImage);
    cv::imwrite(dirName + "/Marge.png", margeImage);
    cv::imwrite(dirName + "/Marge8U.png", marge8UImage);
    //BinaryProcessing(ori, margeImage, dirName);

}





int main() {
    std::string inputFileName = "trimmed.tif";

    // 出力処理
    std::string baseDir = "output/";
    std::string dirBaseName = "sigma18_lambda42_";
    std::string dirName = baseDir + dirBaseName;
    

    // outputディレクトリが存在しない場合は作成
    if (!std::filesystem::exists(baseDir)) {
        std::filesystem::create_directory(baseDir);
    }

    int dirCount = 2;
    // 指定されたディレクトリが存在する場合は、新しい名前を生成
    while (std::filesystem::exists(dirName)) {
        dirName = baseDir + dirBaseName + std::to_string(dirCount);
        ++dirCount;
    }

    std::filesystem::create_directory(dirName);

    cv::Mat image = cv::imread(inputFileName, cv::IMREAD_ANYDEPTH);

    // 階調処理
    cv::Mat windowedImage;
    WindowProcessing(image, windowedImage, 255.0, dirName);

    // ノイズ除去
    cv::Mat gaussinaBlurImage;
    cv::GaussianBlur(windowedImage, gaussinaBlurImage, cv::Size(3, 3), 9, 0);

    // CLAHEを用いたヒストグラム平坦化
    cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(10.0, cv::Size(16, 16));
    cv::Mat claheImage;
    clahe->apply(windowedImage, claheImage);

    // コントラスト強調
    cv::Mat claheOtsuBinaryImage;
    cv::threshold(claheImage, claheOtsuBinaryImage, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);

    //BinaryProcessing(windowedImage, claheImage, dirName);

    cv::Mat inversedClaheImage;
    inversedClaheImage = inverseImage(claheImage);

    // ガボールフィルタ
    GaborProcessing(windowedImage, inversedClaheImage, dirName);



    cv::imwrite(dirName + "/Original.tif", image);
    cv::imwrite(dirName + "/WindowedImage.png", windowedImage);
    cv::imwrite(dirName + "/Gaussian.png", gaussinaBlurImage);
    cv::imwrite(dirName + "/CLAHE.png", claheImage);
    cv::imwrite(dirName + "/CLAHEInversion.png", inversedClaheImage);
    cv::imwrite(dirName + "/CLAHEOtsu.png", claheOtsuBinaryImage);

    return 0;
}