Keras提供了一些用ImageNet训练过的模型:Xception,VGG16,VGG19,ResNet50,InceptionV3。在使用这些模型的时候,有一个参数include_top表示是否包含模型顶部的全连接层,如果包含,则可以将图像分为ImageNet中的1000类,如果不包含,则可以利用这些参数来做一些定制的事情。
在运行时自动下载有可能会失败,需要去网站中手动下载,放在“~/.keras/models/”中,使用WinPython则在“settings/.keras/models/”中。
修正:表示当前是训练模式还是测试模式的参数K.learning_phase()文中表述和使用有误,在该函数说明中可以看到:
The learning phase flag is a bool tensor (0 = test, 1 = train),所以0是测试模式,1是训练模式,部分网络结构下两者有差别。
这里使用ResNet50预训练模型,对Caltech101数据集进行图像分类。只有CPU,运行较慢,但是在训练集固定的情况下,较慢的过程只需要运行一次。
该预训练模型的中文文档介绍在http://keras-cn.readthedocs.io/en/latest/other/application/#resnet50。
我使用的版本:
1.Ubuntu 16.04.3
2.Python 2.7
3.Keras 2.0.8
4.Tensoflow 1.3.0
5.Numpy 1.13.1
6.python-opencv 2.4.9.1+dfsg-1.5ubuntu1
7.h5py 2.7.0
从文件夹中提取图像数据的方式:
函数:
def eachFile(filepath): #将目录内的文件名放入列表中 pathDir = os.listdir(filepath) out = [] for allDir in pathDir: child = allDir.decode('gbk') # .decode('gbk')是解决中文显示乱码问题 out.append(child) return out def get_data(data_name,train_left=0.0,train_right=0.7,train_all=0.7,resize=True,data_format=None,t=''): #从文件夹中获取图像数据 file_name = os.path.join(pic_dir_out,data_name+t+'_'+str(train_left)+'_'+str(train_right)+'_'+str(Width)+"X"+str(Height)+".h5") print file_name if os.path.exists(file_name): #判断之前是否有存到文件中 f = h5py.File(file_name,'r') if t=='train': X_train = f['X_train'][:] y_train = f['y_train'][:] f.close() return (X_train, y_train) elif t=='test': X_test = f['X_test'][:] y_test = f['y_test'][:] f.close() return (X_test, y_test) else: return data_format = conv_utils.normalize_data_format(data_format) pic_dir_set = eachFile(pic_dir_data) X_train = [] y_train = [] X_test = [] y_test = [] label = 0 for pic_dir in pic_dir_set: print pic_dir_data+pic_dir if not os.path.isdir(os.path.join(pic_dir_data,pic_dir)): continue pic_set = eachFile(os.path.join(pic_dir_data,pic_dir)) pic_index = 0 train_count = int(len(pic_set)*train_all) train_l = int(len(pic_set)*train_left) train_r = int(len(pic_set)*train_right) for pic_name in pic_set: if not os.path.isfile(os.path.join(pic_dir_data,pic_dir,pic_name)): continue img = cv2.imread(os.path.join(pic_dir_data,pic_dir,pic_name)) if img is None: continue if (resize): img = cv2.resize(img,(Width,Height)) img = img.reshape(-1,Width,Height,3) if (pic_index < train_count): if t=='train': if (pic_index >= train_l and pic_index < train_r): X_train.append(img) y_train.append(label) else: if t=='test': X_test.append(img) y_test.append(label) pic_index += 1 if len(pic_set) <> 0: label += 1 f = h5py.File(file_name,'w') if t=='train': X_train = np.concatenate(X_train,axis=0) y_train = np.array(y_train) f.create_dataset('X_train', data = X_train) f.create_dataset('y_train', data = y_train) f.close() return (X_train, y_train) elif t=='test': X_test = np.concatenate(X_test,axis=0) y_test = np.array(y_test) f.create_dataset('X_test', data = X_test) f.create_dataset('y_test', data = y_test) f.close() return (X_test, y_test) else: return
调用:
global Width, Height, pic_dir_out, pic_dir_data Width = 224 Height = 224 num_classes = 102 #Caltech101为102 cifar10为10 pic_dir_out = '/home/ccuux3/pic_cnn/pic_out/' pic_dir_data = '/home/ccuux3/pic_cnn/pic_dataset/Caltech101/' sub_dir = '224_resnet50/' if not os.path.isdir(os.path.join(pic_dir_out,sub_dir)): os.mkdir(os.path.join(pic_dir_out,sub_dir)) pic_dir_mine = os.path.join(pic_dir_out,sub_dir) (X_train, y_train) = get_data("Caltech101_color_data_",0.0,0.7,data_format='channels_last',t='train') y_train = np_utils.to_categorical(y_train, num_classes)
载入预训练模型ResNet50,并将训练图像经过网络运算得到数据,不包含顶部的全连接层,得到的结果存成文件,以后可以直接调用(由于我内存不够,所以拆分了一下):
input_tensor = Input(shape=(224, 224, 3)) base_model = ResNet50(input_tensor=input_tensor,include_top=False,weights='imagenet') #base_model = ResNet50(input_tensor=input_tensor,include_top=False,weights=None) get_resnet50_output = K.function([base_model.layers[0].input, K.learning_phase()], [base_model.layers[-1].output]) file_name = os.path.join(pic_dir_mine,'resnet50_train_output'+'.h5') if os.path.exists(file_name): f = h5py.File(file_name,'r') resnet50_train_output = f['resnet50_train_output'][:] f.close() else: resnet50_train_output = [] delta = 10 for i in range(0,len(X_train),delta): print i one_resnet50_train_output = get_resnet50_output([X_train[i:i+delta], 0])[0] resnet50_train_output.append(one_resnet50_train_output) resnet50_train_output = np.concatenate(resnet50_train_output,axis=0) f = h5py.File(file_name,'w') f.create_dataset('resnet50_train_output', data = resnet50_train_output) f.close()
将ResNet50网络产生的结果用于图像分类:
input_tensor = Input(shape=(1, 1, 2048)) x = Flatten()(input_tensor) x = Dense(1024, activation='relu')(x) predictions = Dense(num_classes, activation='softmax')(x) model = Model(inputs=input_tensor, outputs=predictions) model.compile(optimizer=Adam(), loss='categorical_crossentropy',metrics=['accuracy'])
训练图像数据集:
print('\nTraining ------------') #从文件中提取参数,训练后存在新的文件中 cm = 0 #修改这个参数可以多次训练 cm_str = '' if cm==0 else str(cm) cm2_str = '' if (cm+1)==0 else str(cm+1) if cm >= 1: model.load_weights(os.path.join(pic_dir_mine,'cnn_model_Caltech101_resnet50_'+cm_str+'.h5')) model.fit(resnet50_train_output, y_train, epochs=10, batch_size=128,) model.save_weights(os.path.join(pic_dir_mine,'cnn_model_Caltech101_resnet50_'+cm2_str+'.h5'))
测试图像数据集:
(X_test, y_test) = get_data("Caltech101_color_data_",0.0,0.7,data_format='channels_last',t='test') y_test = np_utils.to_categorical(y_test, num_classes) file_name = os.path.join(pic_dir_mine,'resnet50_test_output'+'.h5') if os.path.exists(file_name): f = h5py.File(file_name,'r') resnet50_test_output = f['resnet50_test_output'][:] f.close() else: resnet50_test_output = [] delta = 10 for i in range(0,len(X_test),delta): print i one_resnet50_test_output = get_resnet50_output([X_test[i:i+delta], 0])[0] resnet50_test_output.append(one_resnet50_test_output) resnet50_test_output = np.concatenate(resnet50_test_output,axis=0) f = h5py.File(file_name,'w') f.create_dataset('resnet50_test_output', data = resnet50_test_output) f.close() print('\nTesting ------------') #对测试集进行评估 class_name_list = get_name_list(pic_dir_data) #获取top-N的每类的准确率 pred = model.predict(resnet50_test_output, batch_size=32)
输出测试集各类别top-5的准确率:
N = 5 pred_list = [] for row in pred: pred_list.append(row.argsort()[-N:][::-1]) #获取最大的N个值的下标 pred_array = np.array(pred_list) test_arg = np.argmax(y_test,axis=1) class_count = [0 for _ in xrange(num_classes)] class_acc = [0 for _ in xrange(num_classes)] for i in xrange(len(test_arg)): class_count[test_arg[i]] += 1 if test_arg[i] in pred_array[i]: class_acc[test_arg[i]] += 1 print('top-'+str(N)+' all acc:',str(sum(class_acc))+'/'+str(len(test_arg)),sum(class_acc)/float(len(test_arg))) for i in xrange(num_classes): print (i, class_name_list[i], 'acc: '+str(class_acc[i])+'/'+str(class_count[i]))
完整代码:
# -*- coding: utf-8 -*- import cv2 import numpy as np import h5py import os from keras.utils import np_utils, conv_utils from keras.models import Model from keras.layers import Flatten, Dense, Input from keras.optimizers import Adam from keras.applications.resnet50 import ResNet50 from keras import backend as K def get_name_list(filepath): #获取各个类别的名字 pathDir = os.listdir(filepath) out = [] for allDir in pathDir: if os.path.isdir(os.path.join(filepath,allDir)): child = allDir.decode('gbk') # .decode('gbk')是解决中文显示乱码问题 out.append(child) return out def eachFile(filepath): #将目录内的文件名放入列表中 pathDir = os.listdir(filepath) out = [] for allDir in pathDir: child = allDir.decode('gbk') # .decode('gbk')是解决中文显示乱码问题 out.append(child) return out def get_data(data_name,train_left=0.0,train_right=0.7,train_all=0.7,resize=True,data_format=None,t=''): #从文件夹中获取图像数据 file_name = os.path.join(pic_dir_out,data_name+t+'_'+str(train_left)+'_'+str(train_right)+'_'+str(Width)+"X"+str(Height)+".h5") print file_name if os.path.exists(file_name): #判断之前是否有存到文件中 f = h5py.File(file_name,'r') if t=='train': X_train = f['X_train'][:] y_train = f['y_train'][:] f.close() return (X_train, y_train) elif t=='test': X_test = f['X_test'][:] y_test = f['y_test'][:] f.close() return (X_test, y_test) else: return data_format = conv_utils.normalize_data_format(data_format) pic_dir_set = eachFile(pic_dir_data) X_train = [] y_train = [] X_test = [] y_test = [] label = 0 for pic_dir in pic_dir_set: print pic_dir_data+pic_dir if not os.path.isdir(os.path.join(pic_dir_data,pic_dir)): continue pic_set = eachFile(os.path.join(pic_dir_data,pic_dir)) pic_index = 0 train_count = int(len(pic_set)*train_all) train_l = int(len(pic_set)*train_left) train_r = int(len(pic_set)*train_right) for pic_name in pic_set: if not os.path.isfile(os.path.join(pic_dir_data,pic_dir,pic_name)): continue img = cv2.imread(os.path.join(pic_dir_data,pic_dir,pic_name)) if img is None: continue if (resize): img = cv2.resize(img,(Width,Height)) img = img.reshape(-1,Width,Height,3) if (pic_index < train_count): if t=='train': if (pic_index >= train_l and pic_index < train_r): X_train.append(img) y_train.append(label) else: if t=='test': X_test.append(img) y_test.append(label) pic_index += 1 if len(pic_set) <> 0: label += 1 f = h5py.File(file_name,'w') if t=='train': X_train = np.concatenate(X_train,axis=0) y_train = np.array(y_train) f.create_dataset('X_train', data = X_train) f.create_dataset('y_train', data = y_train) f.close() return (X_train, y_train) elif t=='test': X_test = np.concatenate(X_test,axis=0) y_test = np.array(y_test) f.create_dataset('X_test', data = X_test) f.create_dataset('y_test', data = y_test) f.close() return (X_test, y_test) else: return def main(): global Width, Height, pic_dir_out, pic_dir_data Width = 224 Height = 224 num_classes = 102 #Caltech101为102 cifar10为10 pic_dir_out = '/home/ccuux3/pic_cnn/pic_out/' pic_dir_data = '/home/ccuux3/pic_cnn/pic_dataset/Caltech101/' sub_dir = '224_resnet50/' if not os.path.isdir(os.path.join(pic_dir_out,sub_dir)): os.mkdir(os.path.join(pic_dir_out,sub_dir)) pic_dir_mine = os.path.join(pic_dir_out,sub_dir) (X_train, y_train) = get_data("Caltech101_color_data_",0.0,0.7,data_format='channels_last',t='train') y_train = np_utils.to_categorical(y_train, num_classes) input_tensor = Input(shape=(224, 224, 3)) base_model = ResNet50(input_tensor=input_tensor,include_top=False,weights='imagenet') #base_model = ResNet50(input_tensor=input_tensor,include_top=False,weights=None) get_resnet50_output = K.function([base_model.layers[0].input, K.learning_phase()], [base_model.layers[-1].output]) file_name = os.path.join(pic_dir_mine,'resnet50_train_output'+'.h5') if os.path.exists(file_name): f = h5py.File(file_name,'r') resnet50_train_output = f['resnet50_train_output'][:] f.close() else: resnet50_train_output = [] delta = 10 for i in range(0,len(X_train),delta): print i one_resnet50_train_output = get_resnet50_output([X_train[i:i+delta], 0])[0] resnet50_train_output.append(one_resnet50_train_output) resnet50_train_output = np.concatenate(resnet50_train_output,axis=0) f = h5py.File(file_name,'w') f.create_dataset('resnet50_train_output', data = resnet50_train_output) f.close() input_tensor = Input(shape=(1, 1, 2048)) x = Flatten()(input_tensor) x = Dense(1024, activation='relu')(x) predictions = Dense(num_classes, activation='softmax')(x) model = Model(inputs=input_tensor, outputs=predictions) model.compile(optimizer=Adam(), loss='categorical_crossentropy',metrics=['accuracy']) print('\nTraining ------------') #从文件中提取参数,训练后存在新的文件中 cm = 0 #修改这个参数可以多次训练 cm_str = '' if cm==0 else str(cm) cm2_str = '' if (cm+1)==0 else str(cm+1) if cm >= 1: model.load_weights(os.path.join(pic_dir_mine,'cnn_model_Caltech101_resnet50_'+cm_str+'.h5')) model.fit(resnet50_train_output, y_train, epochs=10, batch_size=128,) model.save_weights(os.path.join(pic_dir_mine,'cnn_model_Caltech101_resnet50_'+cm2_str+'.h5')) (X_test, y_test) = get_data("Caltech101_color_data_",0.0,0.7,data_format='channels_last',t='test') y_test = np_utils.to_categorical(y_test, num_classes) file_name = os.path.join(pic_dir_mine,'resnet50_test_output'+'.h5') if os.path.exists(file_name): f = h5py.File(file_name,'r') resnet50_test_output = f['resnet50_test_output'][:] f.close() else: resnet50_test_output = [] delta = 10 for i in range(0,len(X_test),delta): print i one_resnet50_test_output = get_resnet50_output([X_test[i:i+delta], 0])[0] resnet50_test_output.append(one_resnet50_test_output) resnet50_test_output = np.concatenate(resnet50_test_output,axis=0) f = h5py.File(file_name,'w') f.create_dataset('resnet50_test_output', data = resnet50_test_output) f.close() print('\nTesting ------------') #对测试集进行评估 class_name_list = get_name_list(pic_dir_data) #获取top-N的每类的准确率 pred = model.predict(resnet50_test_output, batch_size=32) f = h5py.File(os.path.join(pic_dir_mine,'pred_'+cm2_str+'.h5'),'w') f.create_dataset('pred', data = pred) f.close() N = 1 pred_list = [] for row in pred: pred_list.append(row.argsort()[-N:][::-1]) #获取最大的N个值的下标 pred_array = np.array(pred_list) test_arg = np.argmax(y_test,axis=1) class_count = [0 for _ in xrange(num_classes)] class_acc = [0 for _ in xrange(num_classes)] for i in xrange(len(test_arg)): class_count[test_arg[i]] += 1 if test_arg[i] in pred_array[i]: class_acc[test_arg[i]] += 1 print('top-'+str(N)+' all acc:',str(sum(class_acc))+'/'+str(len(test_arg)),sum(class_acc)/float(len(test_arg))) for i in xrange(num_classes): print (i, class_name_list[i], 'acc: '+str(class_acc[i])+'/'+str(class_count[i])) print('----------------------------------------------------') N = 5 pred_list = [] for row in pred: pred_list.append(row.argsort()[-N:][::-1]) #获取最大的N个值的下标 pred_array = np.array(pred_list) test_arg = np.argmax(y_test,axis=1) class_count = [0 for _ in xrange(num_classes)] class_acc = [0 for _ in xrange(num_classes)] for i in xrange(len(test_arg)): class_count[test_arg[i]] += 1 if test_arg[i] in pred_array[i]: class_acc[test_arg[i]] += 1 print('top-'+str(N)+' all acc:',str(sum(class_acc))+'/'+str(len(test_arg)),sum(class_acc)/float(len(test_arg))) for i in xrange(num_classes): print (i, class_name_list[i], 'acc: '+str(class_acc[i])+'/'+str(class_count[i])) if __name__ == '__main__': main()
运行结果:
Using TensorFlow backend. /home/ccuux3/pic_cnn/pic_out/Caltech101_color_data_train_0.0_0.7_224X224.h5 Training ------------ Epoch 1/10 6353/6353 [==============================] - 5s - loss: 1.1269 - acc: 0.7494 Epoch 2/10 6353/6353 [==============================] - 4s - loss: 0.1603 - acc: 0.9536 Epoch 3/10 6353/6353 [==============================] - 4s - loss: 0.0580 - acc: 0.9855 Epoch 4/10 6353/6353 [==============================] - 4s - loss: 0.0312 - acc: 0.9931 Epoch 5/10 6353/6353 [==============================] - 4s - loss: 0.0182 - acc: 0.9956 Epoch 6/10 6353/6353 [==============================] - 4s - loss: 0.0111 - acc: 0.9976 Epoch 7/10 6353/6353 [==============================] - 4s - loss: 0.0090 - acc: 0.9981 Epoch 8/10 6353/6353 [==============================] - 4s - loss: 0.0082 - acc: 0.9987 Epoch 9/10 6353/6353 [==============================] - 4s - loss: 0.0069 - acc: 0.9994 Epoch 10/10 6353/6353 [==============================] - 4s - loss: 0.0087 - acc: 0.9987 /home/ccuux3/pic_cnn/pic_out/Caltech101_color_data_test_0.0_0.7_224X224.h5 Testing ------------ ('top-1 all acc:', '2597/2792', 0.9301575931232091) (0, u'62.mayfly', 'acc: 10/12') (1, u'66.Motorbikes', 'acc: 240/240') (2, u'68.octopus', 'acc: 7/11') (3, u'94.umbrella', 'acc: 21/23') (4, u'90.strawberry', 'acc: 10/11') (5, u'86.stapler', 'acc: 13/14') (6, u'83.sea_horse', 'acc: 15/18') (7, u'72.pigeon', 'acc: 13/14') (8, u'89.stop_sign', 'acc: 19/20') (9, u'4.BACKGROUND_Google', 'acc: 125/141') (10, u'22.cougar_face', 'acc: 18/21') (11, u'81.scissors', 'acc: 9/12') (12, u'100.wrench', 'acc: 8/12') (13, u'57.Leopards', 'acc: 60/60') (14, u'46.hawksbill', 'acc: 29/30') (15, u'30.dolphin', 'acc: 19/20') (16, u'9.bonsai', 'acc: 39/39') (17, u'35.euphonium', 'acc: 18/20') (18, u'44.gramophone', 'acc: 16/16') (19, u'74.platypus', 'acc: 7/11') (20, u'14.camera', 'acc: 15/15') (21, u'55.lamp', 'acc: 15/19') (22, u'38.Faces_easy', 'acc: 129/131') (23, u'54.ketch', 'acc: 28/35') (24, u'33.elephant', 'acc: 18/20') (25, u'3.ant', 'acc: 8/13') (26, u'49.helicopter', 'acc: 26/27') (27, u'36.ewer', 'acc: 26/26') (28, u'78.rooster', 'acc: 14/15') (29, u'70.pagoda', 'acc: 15/15') (30, u'58.llama', 'acc: 20/24') (31, u'5.barrel', 'acc: 15/15') (32, u'101.yin_yang', 'acc: 18/18') (33, u'18.cellphone', 'acc: 18/18') (34, u'59.lobster', 'acc: 7/13') (35, u'17.ceiling_fan', 'acc: 14/15') (36, u'16.car_side', 'acc: 37/37') (37, u'50.ibis', 'acc: 24/24') (38, u'76.revolver', 'acc: 23/25') (39, u'84.snoopy', 'acc: 7/11') (40, u'87.starfish', 'acc: 26/26') (41, u'12.buddha', 'acc: 24/26') (42, u'52.joshua_tree', 'acc: 20/20') (43, u'43.gerenuk', 'acc: 10/11') (44, u'65.minaret', 'acc: 23/23') (45, u'91.sunflower', 'acc: 26/26') (46, u'56.laptop', 'acc: 24/25') (47, u'77.rhino', 'acc: 17/18') (48, u'1.airplanes', 'acc: 239/240') (49, u'88.stegosaurus', 'acc: 16/18') (50, u'23.crab', 'acc: 17/22') (51, u'8.binocular', 'acc: 8/10') (52, u'31.dragonfly', 'acc: 18/21') (53, u'6.bass', 'acc: 15/17') (54, u'95.watch', 'acc: 72/72') (55, u'0.accordion', 'acc: 17/17') (56, u'98.wild_cat', 'acc: 9/11') (57, u'67.nautilus', 'acc: 16/17') (58, u'40.flamingo', 'acc: 20/21') (59, u'92.tick', 'acc: 12/15') (60, u'47.headphone', 'acc: 12/13') (61, u'24.crayfish', 'acc: 15/21') (62, u'97.wheelchair', 'acc: 17/18') (63, u'27.cup', 'acc: 15/18') (64, u'25.crocodile', 'acc: 14/15') (65, u'2.anchor', 'acc: 7/13') (66, u'19.chair', 'acc: 17/19') (67, u'39.ferry', 'acc: 21/21') (68, u'60.lotus', 'acc: 16/20') (69, u'13.butterfly', 'acc: 26/28') (70, u'34.emu', 'acc: 14/16') (71, u'64.metronome', 'acc: 10/10') (72, u'82.scorpion', 'acc: 24/26') (73, u'7.beaver', 'acc: 12/14') (74, u'48.hedgehog', 'acc: 16/17') (75, u'37.Faces', 'acc: 131/131') (76, u'45.grand_piano', 'acc: 30/30') (77, u'79.saxophone', 'acc: 11/12') (78, u'26.crocodile_head', 'acc: 9/16') (79, u'80.schooner', 'acc: 15/19') (80, u'93.trilobite', 'acc: 26/26') (81, u'28.dalmatian', 'acc: 21/21') (82, u'10.brain', 'acc: 28/30') (83, u'61.mandolin', 'acc: 10/13') (84, u'11.brontosaurus', 'acc: 11/13') (85, u'63.menorah', 'acc: 25/27') (86, u'85.soccer_ball', 'acc: 20/20') (87, u'51.inline_skate', 'acc: 9/10') (88, u'71.panda', 'acc: 11/12') (89, u'53.kangaroo', 'acc: 24/26') (90, u'99.windsor_chair', 'acc: 16/17') (91, u'42.garfield', 'acc: 11/11') (92, u'29.dollar_bill', 'acc: 16/16') (93, u'20.chandelier', 'acc: 30/33') (94, u'96.water_lilly', 'acc: 6/12') (95, u'41.flamingo_head', 'acc: 13/14') (96, u'73.pizza', 'acc: 13/16') (97, u'21.cougar_body', 'acc: 15/15') (98, u'75.pyramid', 'acc: 16/18') (99, u'69.okapi', 'acc: 12/12') (100, u'15.cannon', 'acc: 11/13') (101, u'32.electric_guitar', 'acc: 19/23') ---------------------------------------------------- ('top-5 all acc:', '2759/2792', 0.9881805157593123) (0, u'62.mayfly', 'acc: 12/12') (1, u'66.Motorbikes', 'acc: 240/240') (2, u'68.octopus', 'acc: 11/11') (3, u'94.umbrella', 'acc: 23/23') (4, u'90.strawberry', 'acc: 11/11') (5, u'86.stapler', 'acc: 14/14') (6, u'83.sea_horse', 'acc: 16/18') (7, u'72.pigeon', 'acc: 14/14') (8, u'89.stop_sign', 'acc: 20/20') (9, u'4.BACKGROUND_Google', 'acc: 141/141') (10, u'22.cougar_face', 'acc: 19/21') (11, u'81.scissors', 'acc: 11/12') (12, u'100.wrench', 'acc: 10/12') (13, u'57.Leopards', 'acc: 60/60') (14, u'46.hawksbill', 'acc: 30/30') (15, u'30.dolphin', 'acc: 20/20') (16, u'9.bonsai', 'acc: 39/39') (17, u'35.euphonium', 'acc: 20/20') (18, u'44.gramophone', 'acc: 16/16') (19, u'74.platypus', 'acc: 9/11') (20, u'14.camera', 'acc: 15/15') (21, u'55.lamp', 'acc: 18/19') (22, u'38.Faces_easy', 'acc: 131/131') (23, u'54.ketch', 'acc: 34/35') (24, u'33.elephant', 'acc: 20/20') (25, u'3.ant', 'acc: 10/13') (26, u'49.helicopter', 'acc: 27/27') (27, u'36.ewer', 'acc: 26/26') (28, u'78.rooster', 'acc: 15/15') (29, u'70.pagoda', 'acc: 15/15') (30, u'58.llama', 'acc: 24/24') (31, u'5.barrel', 'acc: 15/15') (32, u'101.yin_yang', 'acc: 18/18') (33, u'18.cellphone', 'acc: 18/18') (34, u'59.lobster', 'acc: 13/13') (35, u'17.ceiling_fan', 'acc: 14/15') (36, u'16.car_side', 'acc: 37/37') (37, u'50.ibis', 'acc: 24/24') (38, u'76.revolver', 'acc: 25/25') (39, u'84.snoopy', 'acc: 10/11') (40, u'87.starfish', 'acc: 26/26') (41, u'12.buddha', 'acc: 25/26') (42, u'52.joshua_tree', 'acc: 20/20') (43, u'43.gerenuk', 'acc: 11/11') (44, u'65.minaret', 'acc: 23/23') (45, u'91.sunflower', 'acc: 26/26') (46, u'56.laptop', 'acc: 25/25') (47, u'77.rhino', 'acc: 18/18') (48, u'1.airplanes', 'acc: 240/240') (49, u'88.stegosaurus', 'acc: 18/18') (50, u'23.crab', 'acc: 22/22') (51, u'8.binocular', 'acc: 10/10') (52, u'31.dragonfly', 'acc: 20/21') (53, u'6.bass', 'acc: 16/17') (54, u'95.watch', 'acc: 72/72') (55, u'0.accordion', 'acc: 17/17') (56, u'98.wild_cat', 'acc: 11/11') (57, u'67.nautilus', 'acc: 17/17') (58, u'40.flamingo', 'acc: 21/21') (59, u'92.tick', 'acc: 13/15') (60, u'47.headphone', 'acc: 12/13') (61, u'24.crayfish', 'acc: 21/21') (62, u'97.wheelchair', 'acc: 18/18') (63, u'27.cup', 'acc: 16/18') (64, u'25.crocodile', 'acc: 15/15') (65, u'2.anchor', 'acc: 12/13') (66, u'19.chair', 'acc: 19/19') (67, u'39.ferry', 'acc: 21/21') (68, u'60.lotus', 'acc: 19/20') (69, u'13.butterfly', 'acc: 27/28') (70, u'34.emu', 'acc: 16/16') (71, u'64.metronome', 'acc: 10/10') (72, u'82.scorpion', 'acc: 26/26') (73, u'7.beaver', 'acc: 14/14') (74, u'48.hedgehog', 'acc: 17/17') (75, u'37.Faces', 'acc: 131/131') (76, u'45.grand_piano', 'acc: 30/30') (77, u'79.saxophone', 'acc: 12/12') (78, u'26.crocodile_head', 'acc: 14/16') (79, u'80.schooner', 'acc: 19/19') (80, u'93.trilobite', 'acc: 26/26') (81, u'28.dalmatian', 'acc: 21/21') (82, u'10.brain', 'acc: 30/30') (83, u'61.mandolin', 'acc: 13/13') (84, u'11.brontosaurus', 'acc: 13/13') (85, u'63.menorah', 'acc: 25/27') (86, u'85.soccer_ball', 'acc: 20/20') (87, u'51.inline_skate', 'acc: 10/10') (88, u'71.panda', 'acc: 12/12') (89, u'53.kangaroo', 'acc: 26/26') (90, u'99.windsor_chair', 'acc: 17/17') (91, u'42.garfield', 'acc: 11/11') (92, u'29.dollar_bill', 'acc: 16/16') (93, u'20.chandelier', 'acc: 32/33') (94, u'96.water_lilly', 'acc: 12/12') (95, u'41.flamingo_head', 'acc: 14/14') (96, u'73.pizza', 'acc: 16/16') (97, u'21.cougar_body', 'acc: 15/15') (98, u'75.pyramid', 'acc: 18/18') (99, u'69.okapi', 'acc: 12/12') (100, u'15.cannon', 'acc: 12/13') (101, u'32.electric_guitar', 'acc: 23/23')
以上这篇使用Keras预训练模型ResNet50进行图像分类方式就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持。
《魔兽世界》大逃杀!60人新游玩模式《强袭风暴》3月21日上线
暴雪近日发布了《魔兽世界》10.2.6 更新内容,新游玩模式《强袭风暴》即将于3月21 日在亚服上线,届时玩家将前往阿拉希高地展开一场 60 人大逃杀对战。
艾泽拉斯的冒险者已经征服了艾泽拉斯的大地及遥远的彼岸。他们在对抗世界上最致命的敌人时展现出过人的手腕,并且成功阻止终结宇宙等级的威胁。当他们在为即将于《魔兽世界》资料片《地心之战》中来袭的萨拉塔斯势力做战斗准备时,他们还需要在熟悉的阿拉希高地面对一个全新的敌人──那就是彼此。在《巨龙崛起》10.2.6 更新的《强袭风暴》中,玩家将会进入一个全新的海盗主题大逃杀式限时活动,其中包含极高的风险和史诗级的奖励。
《强袭风暴》不是普通的战场,作为一个独立于主游戏之外的活动,玩家可以用大逃杀的风格来体验《魔兽世界》,不分职业、不分装备(除了你在赛局中捡到的),光是技巧和战略的强弱之分就能决定出谁才是能坚持到最后的赢家。本次活动将会开放单人和双人模式,玩家在加入海盗主题的预赛大厅区域前,可以从强袭风暴角色画面新增好友。游玩游戏将可以累计名望轨迹,《巨龙崛起》和《魔兽世界:巫妖王之怒 经典版》的玩家都可以获得奖励。
更新日志
- 【雨果唱片】中国管弦乐《鹿回头》WAV
- APM亚流新世代《一起冒险》[FLAC/分轨][106.77MB]
- 崔健《飞狗》律冻文化[WAV+CUE][1.1G]
- 罗志祥《舞状元 (Explicit)》[320K/MP3][66.77MB]
- 尤雅.1997-幽雅精粹2CD【南方】【WAV+CUE】
- 张惠妹.2007-STAR(引进版)【EMI百代】【WAV+CUE】
- 群星.2008-LOVE情歌集VOL.8【正东】【WAV+CUE】
- 罗志祥《舞状元 (Explicit)》[FLAC/分轨][360.76MB]
- Tank《我不伟大,至少我能改变我。》[320K/MP3][160.41MB]
- Tank《我不伟大,至少我能改变我。》[FLAC/分轨][236.89MB]
- CD圣经推荐-夏韶声《谙2》SACD-ISO
- 钟镇涛-《百分百钟镇涛》首批限量版SACD-ISO
- 群星《继续微笑致敬许冠杰》[低速原抓WAV+CUE]
- 潘秀琼.2003-国语难忘金曲珍藏集【皇星全音】【WAV+CUE】
- 林东松.1997-2039玫瑰事件【宝丽金】【WAV+CUE】