diff --git a/ColorConversion.py b/ColorConversion.py new file mode 100644 index 0000000..f3f464f --- /dev/null +++ b/ColorConversion.py @@ -0,0 +1,282 @@ +import matplotlib.pyplot as plt +import openpyxl +import numpy as np +from scipy import interpolate +from sklearn.linear_model import LinearRegression + + +# エクセルから指定範囲読み込み +def read_cell_range(sheet, start_cell, end_cell): + cell_range = sheet[start_cell:end_cell] + cell_values = [] + + for row in cell_range: + for cell in row: + cell_values.append(cell.value) + + return np.array(cell_values) + + +# Cubic補間 +def interporate(x_org, y_org, x_target): + intp = interpolate.interp1d(x_org, y_org, kind="cubic") + return intp(x_target) + + +# XYZ値算出 +def calc_XYZ(reflectance): + # global light, x_bar, y_bar, z_bar, K + X = np.sum(reflectance * light * x_bar) / K + Y = np.sum(reflectance * light * y_bar) / K + Z = np.sum(reflectance * light * z_bar) / K + return [X, Y, Z] + + +# MCCで校正したゲインでRGB値算出 +def calc_mcc_rgb(reflectance): + # global light, cam_r, cam_g, cam_b, gain_mcc_r, gain_mcc_g, gain_mcc_b + r = np.sum(reflectance * light * cam_r) * gain_mcc_r + g = np.sum(reflectance * light * cam_g) * gain_mcc_g + b = np.sum(reflectance * light * cam_b) * gain_mcc_b + return [r, g, b] + + +# TCCで校正したゲインでRGB値算出 +def calc_tcc_rgb(reflectance): + # global light, cam_r, cam_g, cam_b, gain_tcc_r, gain_tcc_g, gain_tcc_b + r = np.sum(reflectance * light * cam_r) * gain_tcc_r + g = np.sum(reflectance * light * cam_g) * gain_tcc_g + b = np.sum(reflectance * light * cam_b) * gain_tcc_b + return [r, g, b] + + +# XYZ->L*a*b*変換の非線形変換 +def f(t): + if t > (6.0 / 29.0) ** 3: + return t ** (1.0 / 3.0) + else: + return ((29.0 / 3.0) ** 3 * t + 16.0) / 116.0 + + +# XYZ -> L*a*b* 変換 +def xyz_to_lab(xyz): + global X_ref, Y_ref, Z_ref + L = 116.0 * f(xyz[1] / Y_ref) - 16.0 + a = 500.0 * (f(xyz[0] / X_ref) - f(xyz[1] / Y_ref)) + b = 200.0 * (f(xyz[1] / Y_ref) - f(xyz[2] / Z_ref)) + return [L, a, b] + + +# CIE分光データから等色関数取得 +wbCIE = openpyxl.load_workbook("SpectrumDataCIE.xlsx") +wsCIE = wbCIE["Sheet1"] +wavelen_org = read_cell_range(wsCIE, "A5", "A99") +x_bar_org = read_cell_range(wsCIE, "B5", "B99") +y_bar_org = read_cell_range(wsCIE, "C5", "C99") +z_bar_org = read_cell_range(wsCIE, "D5", "D99") +# 400~700nm, 10nm刻み に補間 +wavelen = np.linspace(400, 700, 31) +x_bar = interporate(wavelen_org, x_bar_org, wavelen) +y_bar = interporate(wavelen_org, y_bar_org, wavelen) +z_bar = interporate(wavelen_org, z_bar_org, wavelen) +# グラフ出力 +fig = plt.figure(figsize=(12, 8)) +ax1 = fig.add_subplot(2, 3, 1) +ax1.plot(wavelen, x_bar, linestyle="solid", color="red") +ax1.plot(wavelen, y_bar, linestyle="solid", color="green") +ax1.plot(wavelen, z_bar, linestyle="solid", color="blue") +ax1.set_title("color matching function") + +# カメラの分光感度特性を取得 +wbSzk = openpyxl.load_workbook("SpectrumDataSuzuki.xlsx") +wsSzk = wbSzk["Sheet1"] +cam_r = read_cell_range(wsSzk, "B4", "B34") +cam_g = read_cell_range(wsSzk, "C4", "C34") +cam_b = read_cell_range(wsSzk, "D4", "D34") +ax2 = fig.add_subplot(2, 3, 2) +ax2.plot(wavelen, cam_r, linestyle="solid", color="red") +ax2.plot(wavelen, cam_g, linestyle="solid", color="green") +ax2.plot(wavelen, cam_b, linestyle="solid", color="blue") +ax2.set_title("Lw115C camera spectral sensitivity") + +# D65, TIAS光源の分光特性を取得 +wavelen2_org = np.linspace(300, 830, 107) +d65_org = read_cell_range(wsCIE, "X6", "X112") +d65 = interporate(wavelen2_org, d65_org, wavelen) +d65_max = d65.max(axis=None) +d65 = d65 / d65_max +tias = read_cell_range(wsSzk, "F4", "F34") +tias_max = tias.max(axis=None) +tias = tias / tias_max +ax3 = fig.add_subplot(2, 3, 3) +ax3.plot(wavelen, tias, linestyle="solid", color="red") +ax3.plot(wavelen, d65, linestyle="solid", color="orange") +ax3.set_title("Illuminations spectrum") +print("Illuminaton selection = TIAS") +light = tias + +# サンプル(肌パッチ,舌モデル)の分光反射率を取得 +sample = np.zeros((5, 31)) +sample[0, :] = read_cell_range(wsSzk, "H4", "H34") / 10000.0 # 肌パッチ1 +sample[1, :] = read_cell_range(wsSzk, "I4", "I34") / 10000.0 # 肌パッチ2 +sample[2, :] = read_cell_range(wsSzk, "J4", "J34") / 10000.0 # 舌モデルA +sample[3, :] = read_cell_range(wsSzk, "K4", "K34") / 10000.0 # 舌モデルB +sample[4, :] = read_cell_range(wsSzk, "L4", "L34") / 10000.0 # 舌モデルC +ax4 = fig.add_subplot(2, 3, 4) +ax4.plot(wavelen, sample[0, :], linestyle="solid") +ax4.plot(wavelen, sample[1, :], linestyle="solid") +ax4.plot(wavelen, sample[2, :], linestyle="solid") +ax4.plot(wavelen, sample[3, :], linestyle="solid") +ax4.plot(wavelen, sample[4, :], linestyle="solid") +ax4.set_title("Samples' reflentance") + +# MCC 24色の分光反射率を取得 +wsMcc = wbSzk["Sheet2"] +mcc = np.zeros((24, wavelen.shape[0])) +ax5 = fig.add_subplot(2, 3, 5) +for i in range(mcc.shape[0]): + mcc[i, :] = read_cell_range(wsMcc, chr(66 + i) + "3", chr(66 + i) + "33") / 10000 + ax5.plot(wavelen, mcc[i, :], linestyle="solid") +ax5.set_title("Macbeth CC reflectance") + +# 舌診色票TCC 24色の分光反射率を取得 +wsTcc = wbSzk["Sheet3"] +tcc = np.zeros((24, wavelen.shape[0])) +ax6 = fig.add_subplot(2, 3, 6) +for i in range(24): + tcc[i, :] = read_cell_range(wsTcc, chr(66 + i) + "3", chr(66 + i) + "33") / 10000 + ax6.plot(wavelen, tcc[i, :], linestyle="solid") +ax6.set_title("Tongue CC reflectance") + +# 参照白色のXYZ値とK値を算出 +K = np.sum(light * y_bar) +X_ref = np.sum(light * x_bar) / K +Z_ref = np.sum(light * z_bar) / K +Y_ref = 1.0 +print("Xw, Yw, Zw = {:.3}, {:.3}, {:.3}".format(X_ref, Y_ref, Z_ref)) + +# MCCのXYZ値算出 +mcc_xyz = np.zeros((mcc.shape[0], 3)) +for i in range(mcc.shape[0]): + mcc_xyz[i, :] = calc_XYZ(mcc[i]) +print("MCC XYZ=") +print(mcc_xyz) + +# カメラで撮影したMCC 24色のRGB値を算出 +# MCC 19番(白パッチ)=200,200,200 でゲイン校正 +calib_target = 18 +calib_value = 200 +gain_mcc_r = calib_value / np.sum(mcc[calib_target] * light * cam_r) +gain_mcc_g = calib_value / np.sum(mcc[calib_target] * light * cam_g) +gain_mcc_b = calib_value / np.sum(mcc[calib_target] * light * cam_b) +mcc_rgb = np.zeros((mcc.shape[0], 3)) +for i in range(mcc.shape[0]): + mcc_rgb[i, :] = calc_mcc_rgb(mcc[i]) +print("MCC RGB=") +print(mcc_rgb) + +# MCCを使って RGB->XYZ 変換行列算出 +rgb2xyz_mcc = LinearRegression() +rgb2xyz_mcc.fit(mcc_rgb, mcc_xyz) + +# MCCの変換行列を使ってMCCの24色をRGB->XYZ変換(検証) +pred_mcc_xyz = rgb2xyz_mcc.predict(mcc_rgb) +print("MCC XYZ Converted by MCC=") +print(pred_mcc_xyz) + + +# サンプルのXYZ値算出 +sample_xyz = np.zeros((sample.shape[0], 3)) +sample_lab = np.zeros((sample.shape[0], 3)) +for i in range(sample.shape[0]): + sample_xyz[i, :] = calc_XYZ(sample[i]) + sample_lab[i, :] = xyz_to_lab(sample_xyz[i, :]) +print("Sample XYZ=") +print(sample_xyz) +print("Sample L*a*b*=") +print(sample_lab) + +# サンプルのRGB値算出 +sample_rgb = np.zeros((sample.shape[0], 3)) +for i in range(sample.shape[0]): + sample_rgb[i, :] = calc_mcc_rgb(sample[i]) +print("Sample RGB=") +print(sample_rgb) + +# MCCの変換行列を使ってサンプルをRGB->XYZ変換(テスト) +pred_sample_xyz_by_mcc = rgb2xyz_mcc.predict(sample_rgb) +print("Sample XYZ Converted by MCC=") +print(pred_sample_xyz_by_mcc) +pred_sample_lab_by_mcc = np.zeros((pred_sample_xyz_by_mcc.shape[0], 3)) +for i in range(pred_sample_xyz_by_mcc.shape[0]): + pred_sample_lab_by_mcc[i, :] = xyz_to_lab(pred_sample_xyz_by_mcc[i, :]) +print("Sample L*a*b* Converted by MCC=") +print(pred_sample_lab_by_mcc) + +# MCCの変換行列を使ったサンプルL*a*b*値の色差を計算 +sample_delta_e_by_mcc = [] +for i in range(pred_sample_lab_by_mcc.shape[0]): + sample_delta_e_by_mcc.append( + np.linalg.norm(sample_lab[i, :] - pred_sample_lab_by_mcc[i, :]) + ) +print("MCCで変換行列を算出した場合のサンプルの色差") +print("肌パッチ1 = ", sample_delta_e_by_mcc[0]) +print("肌パッチ2 = ", sample_delta_e_by_mcc[1]) +print("舌モデルA = ", sample_delta_e_by_mcc[2]) +print("舌モデルB = ", sample_delta_e_by_mcc[3]) +print("舌モデルC = ", sample_delta_e_by_mcc[4]) + + +# TCCのXYZ値算出 +tcc_xyz = np.zeros((tcc.shape[0], 3)) +for i in range(tcc.shape[0]): + tcc_xyz[i, :] = calc_XYZ(tcc[i]) +print("TCC XYZ=") +print(tcc_xyz) + +# カメラで撮影したTCC 24色のRGB値を算出 +# TCC 13番(白パッチ)=200,200,200 でゲイン校正 +calib_target = 12 +calib_value = 200 +gain_tcc_r = calib_value / np.sum(tcc[calib_target] * light * cam_r) +gain_tcc_g = calib_value / np.sum(tcc[calib_target] * light * cam_g) +gain_tcc_b = calib_value / np.sum(tcc[calib_target] * light * cam_b) +tcc_rgb = np.zeros((tcc.shape[0], 3)) +for i in range(tcc.shape[0]): + tcc_rgb[i, :] = calc_tcc_rgb(tcc[i]) +print("TCC RGB=") +print(tcc_rgb) + +# TCCを使って RGB->XYZ 変換行列算出 +rgb2xyz_tcc = LinearRegression() +rgb2xyz_tcc.fit(tcc_rgb, tcc_xyz) + +# TCCの変換行列を使ってTCCの24色をRGB->XYZ変換(検証) +pred_tcc_xyz = rgb2xyz_tcc.predict(tcc_rgb) +print("TCC XYZ Converted by TCC=") +print(pred_tcc_xyz) + +# TCCの変換行列を使ってサンプルをRGB->XYZ変換(テスト) +pred_sample_xyz_by_tcc = rgb2xyz_tcc.predict(sample_rgb) +print("Sample XYZ Converted by TCC=") +print(pred_sample_xyz_by_tcc) +pred_sample_lab_by_tcc = np.zeros((pred_sample_xyz_by_tcc.shape[0], 3)) +for i in range(pred_sample_xyz_by_tcc.shape[0]): + pred_sample_lab_by_tcc[i, :] = xyz_to_lab(pred_sample_xyz_by_tcc[i, :]) +print("Sample L*a*b* Converted by TCC=") +print(pred_sample_lab_by_tcc) + +# TCCの変換行列を使ったサンプルL*a*b*値の色差を計算 +sample_delta_e_by_tcc = [] +for i in range(pred_sample_lab_by_tcc.shape[0]): + sample_delta_e_by_tcc.append( + np.linalg.norm(sample_lab[i, :] - pred_sample_lab_by_tcc[i, :]) + ) +print("TCCで変換行列を算出した場合のサンプルの色差") +print("肌パッチ1 = ", sample_delta_e_by_tcc[0]) +print("肌パッチ2 = ", sample_delta_e_by_tcc[1]) +print("舌モデルA = ", sample_delta_e_by_tcc[2]) +print("舌モデルB = ", sample_delta_e_by_tcc[3]) +print("舌モデルC = ", sample_delta_e_by_tcc[4]) + +plt.show() diff --git a/README.md b/README.md new file mode 100644 index 0000000..d87fdf2 --- /dev/null +++ b/README.md @@ -0,0 +1,12 @@ +# Evaluating Color Checker + +カラーチェッカーの RGB->XYZ(Lab)変換精度評価スクリプト + +## 開発環境 + +- Python +- matplotlib +- openpyxl +- numpy +- scipy +- scikitlearn diff --git a/SpectrumDataCIE.xlsx b/SpectrumDataCIE.xlsx new file mode 100644 index 0000000..7f26e90 --- /dev/null +++ b/SpectrumDataCIE.xlsx Binary files differ diff --git a/SpectrumDataSuzuki.xlsx b/SpectrumDataSuzuki.xlsx new file mode 100644 index 0000000..31c6372 --- /dev/null +++ b/SpectrumDataSuzuki.xlsx Binary files differ