我就废话不多说了,大家还是直接看代码吧!
import keras from keras.layers import Input,Dense,Conv2D from keras.layers import MaxPooling2D,Flatten,Convolution2D from keras.models import Model import os import numpy as np from PIL import Image from keras.optimizers import SGD from scipy import misc root_path = os.getcwd() train_names = ['bear','blackswan','bus','camel','car','cows','dance','dog','hike','hoc','kite','lucia','mallerd','pigs','soapbox','stro','surf','swing','train','walking'] test_names = ['boat','dance-jump','drift-turn','elephant','libby'] def load_data(seq_names,data_number,seq_len): #生成图片对 print('loading data.....') frame_num = 51 train_data1 = [] train_data2 = [] train_lab = [] count = 0 while count < data_number: count = count + 1 pos_neg = np.random.randint(0,2) if pos_neg==0: seed1 = np.random.randint(0,seq_len) seed2 = np.random.randint(0,seq_len) while seed1 == seed2: seed1 = np.random.randint(0,seq_len) seed2 = np.random.randint(0,seq_len) frame1 = np.random.randint(1,frame_num) frame2 = np.random.randint(1,frame_num) path1 = os.path.join(root_path,'data','simility_data',seq_names[seed1],str(frame1)+'.jpg') path2 = os.path.join(root_path, 'data', 'simility_data', seq_names[seed2], str(frame2) + '.jpg') image1 = np.array(misc.imresize(Image.open(path1),[224,224])) image2 = np.array(misc.imresize(Image.open(path2),[224,224])) train_data1.append(image1) train_data2.append(image2) train_lab.append(np.array(0)) else: seed = np.random.randint(0,seq_len) frame1 = np.random.randint(1, frame_num) frame2 = np.random.randint(1, frame_num) path1 = os.path.join(root_path, 'data', 'simility_data', seq_names[seed], str(frame1) + '.jpg') path2 = os.path.join(root_path, 'data', 'simility_data', seq_names[seed], str(frame2) + '.jpg') image1 = np.array(misc.imresize(Image.open(path1),[224,224])) image2 = np.array(misc.imresize(Image.open(path2),[224,224])) train_data1.append(image1) train_data2.append(image2) train_lab.append(np.array(1)) return np.array(train_data1),np.array(train_data2),np.array(train_lab) def vgg_16_base(input_tensor): net = Conv2D(64(3,3),activation='relu',padding='same',input_shape=(224,224,3))(input_tensor) net = Convolution2D(64,(3,3),activation='relu',padding='same')(net) net = MaxPooling2D((2,2),strides=(2,2))(net) net = Convolution2D(128,(3,3),activation='relu',padding='same')(net) net = Convolution2D(128,(3,3),activation='relu',padding='same')(net) net= MaxPooling2D((2,2),strides=(2,2))(net) net = Convolution2D(256,(3,3),activation='relu',padding='same')(net) net = Convolution2D(256,(3,3),activation='relu',padding='same')(net) net = Convolution2D(256,(3,3),activation='relu',padding='same')(net) net = MaxPooling2D((2,2),strides=(2,2))(net) net = Convolution2D(512,(3,3),activation='relu',padding='same')(net) net = Convolution2D(512,(3,3),activation='relu',padding='same')(net) net = Convolution2D(512,(3,3),activation='relu',padding='same')(net) net = MaxPooling2D((2,2),strides=(2,2))(net) net = Convolution2D(512,(3,3),activation='relu',padding='same')(net) net = Convolution2D(512,(3,3),activation='relu',padding='same')(net) net = Convolution2D(512,(3,3),activation='relu',padding='same')(net) net = MaxPooling2D((2,2),strides=(2,2))(net) net = Flatten()(net) return net def siamese(vgg_path=None,siamese_path=None): input_tensor = Input(shape=(224,224,3)) vgg_model = Model(input_tensor,vgg_16_base(input_tensor)) if vgg_path: vgg_model.load_weights(vgg_path) input_im1 = Input(shape=(224,224,3)) input_im2 = Input(shape=(224,224,3)) out_im1 = vgg_model(input_im1) out_im2 = vgg_model(input_im2) diff = keras.layers.substract([out_im1,out_im2]) out = Dense(500,activation='relu')(diff) out = Dense(1,activation='sigmoid')(out) model = Model([input_im1,input_im2],out) if siamese_path: model.load_weights(siamese_path) return model train = True if train: model = siamese(siamese_path='model/simility/vgg.h5') sgd = SGD(lr=1e-6,momentum=0.9,decay=1e-6,nesterov=True) model.compile(optimizer=sgd,loss='mse',metrics=['accuracy']) tensorboard = keras.callbacks.TensorBoard(histogram_freq=5,log_dir='log/simility',write_grads=True,write_images=True) ckpt = keras.callbacks.ModelCheckpoint(os.path.join(root_path,'model','simility','vgg.h5'), verbose=1,period=5) train_data1,train_data2,train_lab = load_data(train_names,4000,20) model.fit([train_data1,train_data2],train_lab,callbacks=[tensorboard,ckpt],batch_size=64,epochs=50) else: model = siamese(siamese_path='model/simility/vgg.h5') test_im1,test_im2,test_labe = load_data(test_names,1000,5) TP = 0 for i in range(1000): im1 = np.expand_dims(test_im1[i],axis=0) im2 = np.expand_dims(test_im2[i],axis=0) lab = test_labe[i] pre = model.predict([im1,im2]) if pre>0.9 and lab==1: TP = TP + 1 if pre<0.9 and lab==0: TP = TP + 1 print(float(TP)/1000)
输入两张图片,标记1为相似,0为不相似。
损失函数用的是简单的均方误差,有待改成Siamese的对比损失。
总结:
1.随机生成了几组1000对的图片,测试精度0.7左右,效果一般。
2.问题 1)数据加载没有用生成器,还得继续认真看看文档 2)训练时划分验证集的时候,训练就会报错,什么输入维度的问题,暂时没找到原因 3)输入的shape好像必须给出数字,本想用shape= input_tensor.get_shape(),能训练,不能保存模型,会报(NOT JSON Serializable,Dimension(None))类型错误
补充知识: keras 问答匹配孪生网络文本匹配 RNN 带有数据
用途:
这篇博客解释了如何搭建一个简单的匹配网络。并且使用了keras的lambda层。在建立网络之前需要对数据进行预处理。处理过后,文本转变为id字符序列。将一对question,answer分别编码可以得到两个向量,在匹配层中比较两个向量,计算相似度。
网络图示:
数据准备:
数据基于网上的淘宝客服对话数据,我也会放在我的下载页面中。原数据是对话,我筛选了其中label为1的对话。然后将对话拆解成QA对,q是用户,a是客服。然后对于每个q,有一个a是匹配的,label为1.再选择一个a,构成新的样本,label为0.
超参数:
比较简单,具体看代码就可以了。
# dialogue max pair q,a max_pair = 30000 # top k frequent word ,k MAX_FEATURES = 450 # fixed q,a length MAX_SENTENCE_LENGTH = 30 embedding_size = 100 batch_size = 600 # learning rate lr = 0.01 HIDDEN_LAYER_SIZE = n_hidden_units = 256 # neurons in hidden layer
细节:
导入一些库
# -*- coding: utf-8 -*- from keras.layers.core import Activation, Dense, Dropout, SpatialDropout1D from keras.layers.embeddings import Embedding from keras.layers.recurrent import LSTM from keras.preprocessing import sequence from sklearn.model_selection import train_test_split import collections import matplotlib.pyplot as plt import nltk import numpy as np import os import pandas as pd from alime_data import convert_dialogue_to_pair from parameter import MAX_SENTENCE_LENGTH,MAX_FEATURES,embedding_size,max_pair,batch_size,HIDDEN_LAYER_SIZE DATA_DIR = "../data" NUM_EPOCHS = 2 # Read training data and generate vocabulary maxlen = 0 num_recs = 0
数据准备,先统计词频,然后取出top N个常用词,然后将句子转换成 单词id的序列。把句子中的有效id靠右边放,将句子左边补齐padding。然后分成训练集和测试集
word_freqs = collections.Counter() training_data = convert_dialogue_to_pair(max_pair) num_recs = len([1 for r in training_data.iterrows()]) #for line in ftrain: for line in training_data.iterrows(): label ,sentence_q = line[1]['label'],line[1]['sentence_q'] label ,sentence_a = line[1]['label'],line[1]['sentence_a'] words = nltk.word_tokenize(sentence_q.lower())#.decode("ascii", "ignore") if len(words) > maxlen: maxlen = len(words) for word in words: word_freqs[word] += 1 words = nltk.word_tokenize(sentence_a.lower())#.decode("ascii", "ignore") if len(words) > maxlen: maxlen = len(words) for word in words: word_freqs[word] += 1 #num_recs += 1 ## Get some information about our corpus # 1 is UNK, 0 is PAD # We take MAX_FEATURES-1 featurs to accound for PAD vocab_size = min(MAX_FEATURES, len(word_freqs)) + 2 word2index = {x[0]: i+2 for i, x in enumerate(word_freqs.most_common(MAX_FEATURES))} word2index["PAD"] = 0 word2index["UNK"] = 1 index2word = {v:k for k, v in word2index.items()} # convert sentences to sequences X_q = np.empty((num_recs, ), dtype=list) X_a = np.empty((num_recs, ), dtype=list) y = np.zeros((num_recs, )) i = 0 def chinese_split(x): return x.split(' ') for line in training_data.iterrows(): label ,sentence_q,sentence_a = line[1]['label'],line[1]['sentence_q'],line[1]['sentence_a'] #label, sentence = line.strip().split("\t") #print(label,sentence) #words = nltk.word_tokenize(sentence_q.lower()) words = chinese_split(sentence_q) seqs = [] for word in words: if word in word2index.keys(): seqs.append(word2index[word]) else: seqs.append(word2index["UNK"]) X_q[i] = seqs #print('add_q') #words = nltk.word_tokenize(sentence_a.lower()) words = chinese_split(sentence_a) seqs = [] for word in words: if word in word2index.keys(): seqs.append(word2index[word]) else: seqs.append(word2index["UNK"]) X_a[i] = seqs y[i] = int(label) i += 1 # Pad the sequences (left padded with zeros) X_a = sequence.pad_sequences(X_a, maxlen=MAX_SENTENCE_LENGTH) X_q = sequence.pad_sequences(X_q, maxlen=MAX_SENTENCE_LENGTH) X = [] for i in range(len(X_a)): concat = [X_q[i],X_a[i]] X.append(concat) # Split input into training and test Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=42) #print(Xtrain.shape, Xtest.shape, ytrain.shape, ytest.shape) Xtrain_Q = [e[0] for e in Xtrain] Xtrain_A = [e[1] for e in Xtrain] Xtest_Q = [e[0] for e in Xtest] Xtest_A = [e[1] for e in Xtest]
最后建立网络。先定义两个函数,一个是句子编码器,另一个是lambda层,计算两个向量的绝对差。将QA分别用encoder处理得到两个向量,把两个向量放入lambda层。最后有了2*hidden size的一层,将这一层接一个dense层,接activation,得到分类概率。
from keras.layers.wrappers import Bidirectional from keras.layers import Input,Lambda from keras.models import Model def encoder(inputs_seqs,rnn_hidden_size,dropout_rate): x_embed = Embedding(vocab_size, embedding_size, input_length=MAX_SENTENCE_LENGTH)(inputs_seqs) inputs_drop = SpatialDropout1D(0.2)(x_embed) encoded_Q = Bidirectional( LSTM(rnn_hidden_size, dropout=dropout_rate, recurrent_dropout=dropout_rate, name='RNN'))(inputs_drop) return encoded_Q def absolute_difference(vecs): a,b =vecs #d = a-b return abs(a - b) inputs_Q = Input(shape=(MAX_SENTENCE_LENGTH,), name="input") # x_embed = Embedding(vocab_size, embedding_size, input_length=MAX_SENTENCE_LENGTH)(inputs_Q) # inputs_drop = SpatialDropout1D(0.2)(x_embed) # encoded_Q = Bidirectional(LSTM(HIDDEN_LAYER_SIZE, dropout=0.2, recurrent_dropout=0.2,name= 'RNN'))(inputs_drop) inputs_A = Input(shape=(MAX_SENTENCE_LENGTH,), name="input_a") # x_embed = Embedding(vocab_size, embedding_size, input_length=MAX_SENTENCE_LENGTH)(inputs_A) # inputs_drop = SpatialDropout1D(0.2)(x_embed) # encoded_A = Bidirectional(LSTM(HIDDEN_LAYER_SIZE, dropout=0.2, recurrent_dropout=0.2,name= 'RNN'))(inputs_drop) encoded_Q = encoder(inputs_Q,HIDDEN_LAYER_SIZE,0.1) encoded_A = encoder(inputs_A,HIDDEN_LAYER_SIZE,0.1) # import tensorflow as tf # difference = tf.subtract(encoded_Q, encoded_A) # difference = tf.abs(difference) similarity = Lambda(absolute_difference)([encoded_Q, encoded_A]) # x = concatenate([encoded_Q, encoded_A]) # # matching_x = Dense(128)(x) # matching_x = Activation("sigmoid")(matching_x) polar = Dense(1)(similarity) prop = Activation("sigmoid")(polar) model = Model(inputs=[inputs_Q,inputs_A], outputs=prop) model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) training_history = model.fit([Xtrain_Q, Xtrain_A], ytrain, batch_size=batch_size, epochs=NUM_EPOCHS, validation_data=([Xtest_Q,Xtest_A], ytest)) # plot loss and accuracy def plot(training_history): plt.subplot(211) plt.title("Accuracy") plt.plot(training_history.history["acc"], color="g", label="Train") plt.plot(training_history.history["val_acc"], color="b", label="Validation") plt.legend(loc="best") plt.subplot(212) plt.title("Loss") plt.plot(training_history.history["loss"], color="g", label="Train") plt.plot(training_history.history["val_loss"], color="b", label="Validation") plt.legend(loc="best") plt.tight_layout() plt.show() # evaluate score, acc = model.evaluate([Xtest_Q,Xtest_A], ytest, batch_size = batch_size) print("Test score: %.3f, accuracy: %.3f" % (score, acc)) for i in range(25): idx = np.random.randint(len(Xtest_Q)) #idx2 = np.random.randint(len(Xtest_A)) xtest_Q = Xtest_Q[idx].reshape(1,MAX_SENTENCE_LENGTH) xtest_A = Xtest_A[idx].reshape(1,MAX_SENTENCE_LENGTH) ylabel = ytest[idx] ypred = model.predict([xtest_Q,xtest_A])[0][0] sent_Q = " ".join([index2word[x] for x in xtest_Q[0].tolist() if x != 0]) sent_A = " ".join([index2word[x] for x in xtest_A[0].tolist() if x != 0]) print("%.0f\t%d\t%s\t%s" % (ypred, ylabel, sent_Q,sent_A))
最后是处理数据的函数,写在另一个文件里。
import nltk from parameter import MAX_FEATURES,MAX_SENTENCE_LENGTH import pandas as pd from collections import Counter def get_pair(number, dialogue): pairs = [] for conversation in dialogue: utterances = conversation[2:].strip('\n').split('\t') # print(utterances) # break for i, utterance in enumerate(utterances): if i % 2 != 0: continue pairs.append([utterances[i], utterances[i + 1]]) if len(pairs) >= number: return pairs return pairs def convert_dialogue_to_pair(k): dialogue = open('dialogue_alibaba2.txt', encoding='utf-8', mode='r') dialogue = dialogue.readlines() dialogue = [p for p in dialogue if p.startswith('1')] print(len(dialogue)) pairs = get_pair(k, dialogue) # break # print(pairs) data = [] for p in pairs: data.append([p[0], p[1], 1]) for i, p in enumerate(pairs): data.append([p[0], pairs[(i + 8) % len(pairs)][1], 0]) df = pd.DataFrame(data, columns=['sentence_q', 'sentence_a', 'label']) print(len(data)) return df
以上这篇keras实现基于孪生网络的图片相似度计算方式就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持。
keras,孪生网络,图片相似度
《魔兽世界》大逃杀!60人新游玩模式《强袭风暴》3月21日上线
暴雪近日发布了《魔兽世界》10.2.6 更新内容,新游玩模式《强袭风暴》即将于3月21 日在亚服上线,届时玩家将前往阿拉希高地展开一场 60 人大逃杀对战。
艾泽拉斯的冒险者已经征服了艾泽拉斯的大地及遥远的彼岸。他们在对抗世界上最致命的敌人时展现出过人的手腕,并且成功阻止终结宇宙等级的威胁。当他们在为即将于《魔兽世界》资料片《地心之战》中来袭的萨拉塔斯势力做战斗准备时,他们还需要在熟悉的阿拉希高地面对一个全新的敌人──那就是彼此。在《巨龙崛起》10.2.6 更新的《强袭风暴》中,玩家将会进入一个全新的海盗主题大逃杀式限时活动,其中包含极高的风险和史诗级的奖励。
《强袭风暴》不是普通的战场,作为一个独立于主游戏之外的活动,玩家可以用大逃杀的风格来体验《魔兽世界》,不分职业、不分装备(除了你在赛局中捡到的),光是技巧和战略的强弱之分就能决定出谁才是能坚持到最后的赢家。本次活动将会开放单人和双人模式,玩家在加入海盗主题的预赛大厅区域前,可以从强袭风暴角色画面新增好友。游玩游戏将可以累计名望轨迹,《巨龙崛起》和《魔兽世界:巫妖王之怒 经典版》的玩家都可以获得奖励。
更新日志
- 【雨果唱片】中国管弦乐《鹿回头》WAV
- APM亚流新世代《一起冒险》[FLAC/分轨][106.77MB]
- 崔健《飞狗》律冻文化[WAV+CUE][1.1G]
- 罗志祥《舞状元 (Explicit)》[320K/MP3][66.77MB]
- 尤雅.1997-幽雅精粹2CD【南方】【WAV+CUE】
- 张惠妹.2007-STAR(引进版)【EMI百代】【WAV+CUE】
- 群星.2008-LOVE情歌集VOL.8【正东】【WAV+CUE】
- 罗志祥《舞状元 (Explicit)》[FLAC/分轨][360.76MB]
- Tank《我不伟大,至少我能改变我。》[320K/MP3][160.41MB]
- Tank《我不伟大,至少我能改变我。》[FLAC/分轨][236.89MB]
- CD圣经推荐-夏韶声《谙2》SACD-ISO
- 钟镇涛-《百分百钟镇涛》首批限量版SACD-ISO
- 群星《继续微笑致敬许冠杰》[低速原抓WAV+CUE]
- 潘秀琼.2003-国语难忘金曲珍藏集【皇星全音】【WAV+CUE】
- 林东松.1997-2039玫瑰事件【宝丽金】【WAV+CUE】