本文提出了一种新的水下视频图像增强策略。基于融合原理,我们的策略仅从图像的退化版本中获取输入和权重度量。为了克服水下介质的局限性,我们定义了两种输入,分别代表原始水下图像/帧的颜色校正和对比度增强版本,以及四种权重图,目的是增加由于介质散射和吸收而退化的远处目标的可见性。我们的策略是一个单一的图像方法,不需要专门的硬件或水下条件或场景结构的知识。我们的融合框架还通过执行有效的边缘保持降噪策略来支持相邻帧之间的时间相干性。增强的图像和视频的特点是降低噪音水平,更好的暗区曝光,改善了全局对比度,而最细微的细节和边缘显著增强。此外,我们的增强技术在几个具有挑战性的应用中被证明是实用的。
提示:以下是本篇文章正文内容,下面案例可供参考
1.首先对单幅Image进行自适应白平衡算法处理得到输入一,其实是灰度世界与直方图均衡化的结合目的是增加对比度的同时,不让图片中像素点出现过大或者过小的地方;
2.对输入一进行ALB变换对L通道进行ACHLE算法处理,这个算法是自适应的,这样不在一个区域的亮度不会相互影响;
拉普拉斯滤波器是依据图像像素点的变化程度来调整图像的细节和边缘,进而增强图像的对比度。
def LaplacianContrast(img):
# img=cv2.CreateMat(h, w, cv2.CV_32FC3)
laplacian = cv2.Laplacian(img,5)
laplacian = cv2.convertScaleAbs(laplacian)
return laplacian
可以提高图像亮处区域和暗处区域的过度部分。
def LocalContrast(img):
h = [1.0 / 16.0, 4.0 / 16.0, 6.0 / 16.0, 4.0 / 16.0, 1.0 / 16.0]
mask = np.ones((len(h),len(h)), img[0][0].dtype)
for i in range(len(h)):
for j in range(len(h)):
mask[i][j]=(h[i] * h[j])
localContrast = cv2.filter2D(img, 5,kernel=mask)
for i in range(len(localContrast)):
for j in range(len(localContrast[0])):
if localContrast[i][j] > math.pi / 2.75:
localContrast[i][j] = math.pi / 2.75
localContrast = cv2.subtract(img, localContrast)
return cv2.multiply(localContrast, localContrast)
采用加入曝光图保护中间色调的显著图
def Saliency(img):
gfgbr = cv2.GaussianBlur(img,(3, 3), 3)
LabIm = cv2.cvtColor(gfgbr, cv2.COLOR_BGR2Lab)
lab = cv2.split(LabIm)
l = np.float32(lab[0])
a = np.float32(lab[1])
b = np.float32(lab[2])
lm = cv2.mean(l)[0] # cv2.mean(l).val[0]
am = cv2.mean(a)[0]
bm = cv2.mean(b)[0]
sm = np.zeros(l.shape, l[0][1].dtype)
l = cv2.subtract(l, lm)
a = cv2.subtract(a, am)
b = cv2.subtract(b, bm)
sm = cv2.add(sm, cv2.multiply(l, l))
sm = cv2.add(sm, cv2.multiply(a, a))
sm = cv2.add(sm, cv2.multiply(b, b))
return sm
水下图像常常呈现出亮度不够的特点,因此把曝光度指标加入到权重计算中,来调节图像曝光度,进而改善亮度信息。曝光度权重是为亮度适中的像素点分配较大的权重,对过亮或过暗的像素点分配较小的权重。算法通过在图像Lab空间的亮度通道L上获得融合图像的曝光度权重。
def Exposedness(img):
sigma = 0.25
average = 0.5
exposedness = np.zeros(img.shape,img[0][0].dtype)
for i in range(len(img)):
for j in range(len(img[0])):
value = math.exp(-1.0 * math.pow(img[i, j] - average, 2.0) / (2 * math.pow(sigma, 2.0)))
exposedness[i][j] = value
return exposedness
def calWeight(img, L):
L = np.float32(np.divide(L, (255.0)))
WL = np.float32(LaplacianContrast(L)) # Check this line 拉普拉斯权重
WC = np.float32(LocalContrast(L)) #局部对比度权重
WS = np.float32(Saliency(img)) #曝光图
WE = np.float32(Exposedness(L)) #暴露权重
weight = WL.copy()
weight = np.add(weight, WC)
weight = np.add(weight, WS)
weight = np.add(weight, WE)
return weight
def enhance(image, level):
img1 = simplest_cb(image, 5)
img1 = np.uint8(img1)
LabIm1 = cv2.cvtColor(img1, cv2.COLOR_BGR2Lab)
L1 = cv2.extractChannel(LabIm1, 0)
# Apply CLAHE
result = applyCLAHE(LabIm1, L1)
img2 = result[0]
L2 = result[1]
w1 = calWeight(img1, L1)
w2 = calWeight(img2, L2)
sumW = cv2.add(w1, w2)
w1 = cv2.divide(w1, sumW)
w2 = cv2.divide(w2, sumW)
return fuseTwoImages(w1, img1, w2, img2, level)
权重基于高斯金字塔分成。图像基于拉普拉斯金字塔融合;
def filterMask(img):
h = [1.0 / 16.0, 4.0 / 16.0, 6.0 / 16.0, 4.0 / 16.0, 1.0 / 16.0]
mask = numpy.zeros((len(h), len(h)), img[0][1].dtype)
for i in range(len(h)):
for j in range(len(h)):
mask[i][j] = h[i] * h[j]
return mask
def buildGaussianPyramid(img, level):
gaussPyr =[]
mask = filterMask(img)
tmp = cv2.filter2D(img, -1, mask)
gaussPyr.append(tmp.copy())
tmpImg = img.copy()
for i in range(1,level):
cv2.resize(tmpImg, (0, 0), tmpImg, 0.5, 0.5, cv2.INTER_LINEAR)
tmp = cv2.filter2D(tmpImg,-1,mask)
gaussPyr.append(tmp.copy())
return gaussPyr
def buildLaplacianPyramid(img, level):
lapPyr = []
lapPyr.append(img.copy())
tmpImg = img.copy()
tmpPyr = img.copy()
for i in range(1,level):
cv2.resize(tmpImg, (0, 0), tmpImg, 0.5, 0.5, cv2.INTER_LINEAR)
lapPyr.append(tmpImg.copy())
for i in range(level - 1):
cv2.resize(lapPyr[i + 1], (len(lapPyr[i][0]), len(lapPyr[i])), tmpPyr, 0, 0, cv2.INTER_LINEAR)
lapPyr[i]=cv2.subtract(lapPyr[i], tmpPyr)
return lapPyr
def reconstructLaplacianPyramid(pyramid):
level = len(pyramid)
for i in range(level - 1,0):
tmpPyr = cv2.resize(pyramid[i], (len(pyramid[0][0]),len(pyramid[0])),fx= 0,fy= 0,interpolation=cv2.INTER_LINEAR)
pyramid[i - 1] = cv2.add(pyramid[i - 1], tmpPyr)
return pyramid[0]
def fuseTwoImages(w1, img1, w2, img2, level):
weight1 = buildGaussianPyramid(w1, level)
weight2 = buildGaussianPyramid(w2, level)
img1 = numpy.float32(img1)
img2 = numpy.float32(img2)
bgr = cv2.split(img1)
bCnl1 = buildLaplacianPyramid(bgr[0], level)
gCnl1 = buildLaplacianPyramid(bgr[1], level)
rCnl1 = buildLaplacianPyramid(bgr[2], level)
bgr = []
bgr = cv2.split(img2)
bCnl2 = buildLaplacianPyramid(bgr[0], level)
gCnl2 = buildLaplacianPyramid(bgr[1], level)
rCnl2 = buildLaplacianPyramid(bgr[2], level)
bCnl=[]
gCnl=[]
rCnl=[]
for i in range(level):
cn = cv2.add(cv2.multiply(bCnl1[i], weight1[i]), cv2.multiply(bCnl2[i], weight2[i]))
bCnl.append(cn.copy())
cn = cv2.add(cv2.multiply(gCnl1[i], weight1[i]), cv2.multiply(gCnl2[i], weight2[i]))
gCnl.append(cn.copy())
cn = cv2.add(cv2.multiply(rCnl1[i], weight1[i]), cv2.multiply(rCnl2[i], weight2[i]))
rCnl.append(cn.copy())
bChannel = reconstructLaplacianPyramid(bCnl)
gChannel = reconstructLaplacianPyramid(gCnl)
rChannel = reconstructLaplacianPyramid(rCnl)
fusion = cv2.merge((bChannel, gChannel, rChannel))
return fusion