当前位置: 首页 > 知识库问答 >
问题:

深度学习图像分类器体系结构配置

沙富
2023-03-14

嗨,我是html" target="_blank">机器学习新手,只知道一些基础知识和事情应该如何运作。所以我看了这篇关于Python、TensorFlow和Keras深度学习的教程,得到了这些代码

import tensorflow as tf
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2

DATADIR = "C:/Users/Acer/imagerec/MRI"

CATEGORIES = ["yes", "no"]

for category in CATEGORIES:
    path = os.path.join(DATADIR,category)
    for img in os.listdir(path):
        img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE)
        plt.imshow(img_array, cmap='gray')


        break
    break
print(img_array)
print(img_array.shape)

IMG_SIZE = 224

new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(new_array, cmap='gray')


training_data = []

def create_training_data():
    for category in CATEGORIES:
        path = os.path.join(DATADIR, category)
        class_num = CATEGORIES.index(category)
        for img in os.listdir(path):
            try:
                img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
                new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
                training_data.append([new_array, class_num])
            except Exception as e:
                pass


create_training_data()

print(len(training_data))

import random

random.shuffle(training_data)
for sample in training_data[:10]:
    print(sample[1])

X = []
y = []
for features, label in training_data:
    X.append(features)
    y.append(label)

X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)

import pickle

pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)



pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)

X = X/255.0

model = Sequential()

model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())

model.add(Dense(64))
model.add(Activation('relu'))

model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.fit(X, y, batch_size=15, epochs=20, validation_split=0.1)

from sklearn.metrics import confusion_matrix
pred = model.predict(X)
pred = np.round(pred)

conf = confusion_matrix(y, pred)

import seaborn as sns
sns.heatmap(conf, annot=True)

plt.show()

因此,运行这些代码给了我很好的结果,验证准确率为76.9%。我需要做的是将此代码的模型更改为VGG16、VGG19和mobilenet,但我不知道如何导入经过预训练的模型,所以我决定创建自己的模型并进行训练。因此,我研究了VGG16和VGG19的体系结构。我研究了有多少conv和maxpooling,并得出了此代码

import tensorflow as tf
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2

DATADIR = "C:/Users/Acer/imagerec/EDA"

CATEGORIES = ["yes", "no"]

for category in CATEGORIES:
    path = os.path.join(DATADIR,category)
    for img in os.listdir(path):
        img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE)
        plt.imshow(img_array, cmap='gray')
        plt.show()

        break
    break
print(img_array)
print(img_array.shape)

IMG_SIZE = 224

new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(new_array, cmap='gray')
plt.show()

training_data = []

def create_training_data():
    for category in CATEGORIES:
        path = os.path.join(DATADIR, category)
        class_num = CATEGORIES.index(category)
        for img in os.listdir(path):
            try:
                img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
                new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
                training_data.append([new_array, class_num])
            except Exception as e:
                pass


create_training_data()

print(len(training_data))

import random

random.shuffle(training_data)
for sample in training_data[:10]:
    print(sample[1])

X = []
y = []
for features, label in training_data:
    X.append(features)
    y.append(label)

X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)

import pickle

pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)



pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)

X = X/255.0

model = Sequential()

model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(MaxPooling2D(pool_size=(1, 1)))

model.add(Conv2D(128, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(Conv2D(128, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(MaxPooling2D(pool_size=(1, 1)))

model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(MaxPooling2D(pool_size=(1, 1)))

model.add(Conv2D(512, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(Conv2D(512, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(Conv2D(512, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(MaxPooling2D(pool_size=(1, 1)))

model.add(Conv2D(512, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(Conv2D(512, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(Conv2D(512, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(MaxPooling2D(pool_size=(1, 1)))

model.add(Flatten())

model.add(Dense(64))
model.add(Activation('relu'))

model.add(Dense(1))
model.add(Activation('sigmoid'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.fit(X, y, batch_size=15, epochs=1, validation_split=0.1)

from sklearn.metrics import confusion_matrix
pred = model.predict(X)
pred = np.round(pred)

conf = confusion_matrix(y, pred)

import seaborn as sns
sns.heatmap(conf, annot=True)

plt.show()

但在任何时代,运行这个程序都会给我57.69%的val准确率,我做错什么了吗?还是我做错了一切?

编辑,所以我现在使用预训练模型

import tensorflow as tf
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2

DATADIR = "C:/Users/Acer/imagerec/MRI"

CATEGORIES = ["yes", "no"]

for category in CATEGORIES:
    path = os.path.join(DATADIR,category)
    for img in os.listdir(path):
        img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE)
        plt.imshow(img_array, cmap='gray')


        break
    break
print(img_array)
print(img_array.shape)

IMG_SIZE = 224

new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(new_array, cmap='gray')


training_data = []

def create_training_data():
    for category in CATEGORIES:
        path = os.path.join(DATADIR, category)
        class_num = CATEGORIES.index(category)
        for img in os.listdir(path):
            try:
                img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
                new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
                training_data.append([new_array, class_num])
            except Exception as e:
                pass


create_training_data()

print(len(training_data))

import random

random.shuffle(training_data)
for sample in training_data[:10]:
    print(sample[1])

X = []
y = []
for features, label in training_data:
    X.append(features)
    y.append(label)

X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)

import pickle

pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)



pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)

X = X/255.0


def input_shape(args):
    pass


from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.layers import Dense

vgg = VGG16(include_top=False, weights='imagenet', input_shape=(), pooling='avg')
x = vgg.output
x = Dense(1, activation='sigmoid')(x)
model = Model(vgg.input, x)
model.summary()

from sklearn.metrics import confusion_matrix
pred = model.predict(X)
pred = np.round(pred)

conf = confusion_matrix(y, pred)

import seaborn as sns
sns.heatmap(conf, annot=True)

plt.show()

model.save('64x2-CNN.model')

得到了这个错误模型:“Model\u 1”

input_1(输入层)(无,无,无,3)0

block1\u conv1(Conv2D)(无,无,无,64)1792

block1_conv2(Conv2D)(无,无,无,64)36928

block1_pool(MaxPooling2D)(无,无,无,64)0

block2_conv1(Conv2D)(无,无,无,128)73856

block2\u conv2(Conv2D)(无,无,无,128)147584

block2_pool(MaxPooling2D)(无,无,无,128)0

block3\u conv1(Conv2D)(无,无,无,256)295168

block3\u conv2(Conv2D)(无,无,无,256)590080

block3\u conv3(Conv2D)(无,无,无,256)590080

block3\u池(MaxPoolig2D)(无、无、无、256)0

block4_conv1(Conv2D)(无,无,无,512) 1180160

block4_conv2(Conv2D)(无,无,无,512) 2359808

block4\u conv3(Conv2D)(无,无,无,512)2359808

block4_pool(MaxPooling2D)(无,无,无,512)0

block5\u conv1(Conv2D)(无,无,无,512)2359808

block5_conv2(Conv2D)(无,无,无,512) 2359808

block5\u conv3(Conv2D)(无,无,无,512)2359808

block5\u池(MaxPoolig2D)(无、无、无、512)0

global\u average\u pooling2d\u 1((无,512)0

总参数:14715201可培训参数:14715201不可培训参数:0

回溯(最近的最后一次调用):文件"C:/User/Acer/PycharmProjects/condas/UwU. py",第95行,在pred=model.predict(X)文件"C:\User\Acer\Anaconda3\envs\condas\lib\site-包\keras\引擎\training.py",第1441行,在预测x,_,_=自己。_standardize_user_data(x)文件"C:\User\Acer\Anaconda3\envs\condas\lib\site-包\keras\引擎\training.py",第579行,在_standardize_user_dataexception_prefix='输入')文件"C:\User\Acer\Anaconda3\envs\condas\lib\site-包\keras\引擎\training_utils.py",第145行,在standardize_input_datastr(data_shape))ValueError:检查输入时出错:期望input_1具有形状(无,无,3),但得到具有形状(50,50,1)的数组

进程已完成,退出代码为1

共有1个答案

墨翔宇
2023-03-14

在keras顺序模型中,只有第一层需要知道它应该期望的input_shape,在您的情况下是它的Conv2D层。此外,使用sigmoid激活添加多个密集层是没有意义的。

参考这个

model = Sequential([
Conv2D(64, (3, 3), input_shape=input_shape, padding='same', activation='relu'),
Conv2D(64, (3, 3), activation='relu', padding='same'),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Conv2D(128, (3, 3), activation='relu', padding='same'),
Conv2D(128, (3, 3), activation='relu', padding='same',),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Conv2D(256, (3, 3), activation='relu', padding='same',),
Conv2D(256, (3, 3), activation='relu', padding='same',),
Conv2D(256, (3, 3), activation='relu', padding='same',),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Conv2D(512, (3, 3), activation='relu', padding='same',),
Conv2D(512, (3, 3), activation='relu', padding='same',),
Conv2D(512, (3, 3), activation='relu', padding='same',),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Conv2D(512, (3, 3), activation='relu', padding='same',),
Conv2D(512, (3, 3), activation='relu', padding='same',),
Conv2D(512, (3, 3), activation='relu', padding='same',),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Flatten(),
Dense(4096, activation='relu'),
Dense(4096, activation='relu'),
Dense(1, activation='sigmoid')
])

或者,您可以使用来自keras应用程序的预训练VGG模型。

from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.layers import Dense

vgg = VGG16(include_top=False, weights='imagenet', input_shape=(), pooling='avg')
x = vgg.output
x = Dense(1, activation='sigmoid')(x)
model = Model(vgg.input, x)
model.summary()
 类似资料:
  • 主要内容:机器学习,深度学习,机器学习与深度学习的区别,机器学习和深度学习的应用人工智能是近几年来最流行的趋势之一。机器学习和深度学习构成了人工智能。下面显示的维恩图解释了机器学习和深度学习的关系 - 机器学习 机器学习是让计算机按照设计和编程的算法行事的科学艺术。许多研究人员认为机器学习是实现人类AI的最佳方式。机器学习包括以下类型的模式 - 监督学习模式 无监督学习模式 深度学习 深度学习是机器学习的一个子领域,其中有关算法的灵感来自大脑的结构和功能,称为人工神经网络。

  • 主要内容:数据量,硬件依赖,特色工程在本章中,我们将讨论机器和深度学习概念之间的主要区别。 数据量 机器学习使用不同数量的数据,主要用于少量数据。另一方面,如果数据量迅速增加,深度学习可以有效地工作。下图描绘了机器学习和深度学习在数据量方面的工作 - 硬件依赖 与传统的机器学习算法相反,深度学习算法设计为在很大程度上依赖于高端机器。深度学习算法执行大量矩阵乘法运算,这需要巨大的硬件支持。 特色工程 特征工程是将领域知识放入指定特征的

  • 主要内容 课程列表 专项课程学习 辅助课程 论文专区 课程列表 课程 机构 参考书 Notes等其他资料 卷积神经网络视觉识别 Stanford 暂无 链接 神经网络 Tweet 暂无 链接 深度学习用于自然语言处理 Stanford 暂无 链接 自然语言处理 Speech and Language Processing 链接 专项课程学习 下述的课程都是公认的最好的在线学习资料,侧重点不同,但推

  • Google Cloud Platform 推出了一个 Learn TensorFlow and deep learning, without a Ph.D. 的教程,介绍了如何基于 Tensorflow 实现 CNN 和 RNN,链接在 这里。 Youtube Slide1 Slide2 Sample Code

  • 立体图像的深度图 作者|OpenCV-Python Tutorials 编译|Vincent 来源|OpenCV-Python Tutorials 目标 在本节中, 我们将学习根据立体图像创建深度图。 基础 在上一节中,我们看到了对极约束和其他相关术语等基本概念。我们还看到,如果我们有两个场景相同的图像,则可以通过直观的方式从中获取深度信息。下面是一张图片和一些简单的数学公式证明了这种想法。 上图

  • 问题内容: 我有一个带有Node的图类,其中每个Node可以连接到其他节点: 我想复制整个图。第一次尝试,我尝试制作一个类似以下的复制构造函数: 因此,深度复制图形将是: 但这不起作用,因为这破坏了节点之间的连接关系。我想知道是否有人建议以一种简单的方式做到这一点?谢谢。 问题答案: 问题是您需要复制节点的身份,而不仅仅是节点的值。具体来说,当您复制某个节点时,您需要处理其所指节点的身份。这意味着