香港科技大学TensorFlow三天速成2(教学PPT)_第1页
香港科技大学TensorFlow三天速成2(教学PPT)_第2页
香港科技大学TensorFlow三天速成2(教学PPT)_第3页
香港科技大学TensorFlow三天速成2(教学PPT)_第4页
香港科技大学TensorFlow三天速成2(教学PPT)_第5页
已阅读5页,还剩119页未读 继续免费阅读

下载本文档

版权说明:本文档由用户提供并上传,收益归属内容提供方,若内容存在侵权,请进行举报或认领

文档简介

TensorFlowUST,Day2:CNNSungKim,1,GiftsfromGoogleandLine,2,TensorFlowMechanics,feeddataandrungraph(operation)sess.run(op,feed_dict=x:x_data),updatevariablesinthegraph(andreturnvalues),BuildgraphusingTensorFlowoperations,3,MachineLearningBasics,LinearRegressionLogisticRegression(Binaryclassification)SoftmaxClassificationNeuralNetworks,4,LinearRegression,W=tf.Variable(tf.random_normal(1),name=weight)b=tf.Variable(tf.random_normal(1),name=bias)#OurhypothesisXW+bhypothesis=X*W+b,#cost/lossfunctioncost=tf.reduce_mean(tf.square(hypothesis-y_train),#Minimizetrain=tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost),GradientDescent,5,train=tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost),LogisticRegression,GradientDescent,Cost?,Model?(Hypothesis?),6,train=tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost),LogisticRegression,GradientDescent,Cost?,Model?(Hypothesis?),hypothesis=tf.sigmoid(tf.matmul(X,W)+b),#cost/lossfunctioncost=-tf.reduce_mean(Y*tf.log(hypothesis)+(1-Y)*tf.log(1-hypothesis),7,SoftmaxClassifier,Cost?,Model?(Hypothesis?),train=tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost),GradientDescent,logits=tf.matmul(X,W)+bhypothesis=tf.nn.softmax(logits),#Crossentropycost/losscost=tf.reduce_mean(-tf.reduce_sum(Y*tf.log(hypothesis),axis=1),8,SoftmaxClassifier,Cost?,Model?(Hypothesis?),train=tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost),GradientDescent,logits=tf.matmul(X,W)+bhypothesis=tf.nn.softmax(logits),#Crossentropycost/losscost=tf.reduce_mean(-tf.reduce_sum(Y*tf.log(hypothesis),axis=1),cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=Y),9,(Deep)NeuralNets,NNforMNIST,#inputplaceholdersX=tf.placeholder(tf.float32,None,784)Y=tf.placeholder(tf.float32,None,10)#weights&biasfornnlayersW1=tf.Variable(tf.random_normal(784,256)b1=tf.Variable(tf.random_normal(256)L1=tf.nn.relu(tf.matmul(X,W1)+b1)W2=tf.Variable(tf.random_normal(256,256)b2=tf.Variable(tf.random_normal(256)L2=tf.nn.relu(tf.matmul(L1,W2)+b2)W3=tf.Variable(tf.random_normal(256,10)b3=tf.Variable(tf.random_normal(10)hypothesis=tf.matmul(L2,W3)+b3#definecost/loss&optimizercost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hypothesis,labels=Y)optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost),10,NNtips,InitilizingweightsActivationfunctionsRegularizationOptimizers,11,Random?,W=tf.Variable(tf.random_normal(1),name=weight),12,13,NNtips,InitilizingweightsActivationfunctionsRegularizationOptimizers,14,Activationfunctions,tf.nn.relu,tf.tanh,15,Activationfunctions,tf.nn.relu,tf.tanh,tf.sigmoid,16,NNtips,InitilizingweightsActivationfunctionsRegularizationOptimizers,17,Overfitting,18,AmIoverfitting?,Veryhighaccuracyonthetrainingdataset(eg:0.99)Pooraccuracyonthetestdataset(0.85),19,Solutionsforoverfitting,Moretrainingdata!ReducethenumberoffeaturesRegularization,20,Regularization,Letsnothavetoobignumbersintheweight,21,Regularization,Letsnothavetoobignumbersintheweight,cost,22,Dropout:ASimpleWaytoPreventNeuralNetworksfromOverfittingSrivastavaetal.2014,23,24,25,TensorFlowimplementation,keep_prob=tf.placeholder(float)L1=L1_d=tf.nn.dropout(L1,keep_prob=keep_prob),TRAIN:sess.run(optimizer,feed_dict=X:batch_xs,Y:batch_ys,keep_prob:0.7)EVALUATION:printAccuracy:,accuracy.eval(X:mnist.test.images,Y:mnist.test.labels,keep_prob:1),26,DropoutforMNIST,#dropout(keep_prob)rate0.7ontraining,butshouldbe1fortestingkeep_prob=tf.placeholder(tf.float32)W1=tf.get_variable(W1,shape=784,512)b1=tf.Variable(tf.random_normal(512)L1=tf.nn.relu(tf.matmul(X,W1)+b1)L1=tf.nn.dropout(L1,keep_prob=keep_prob)W2=tf.get_variable(W2,shape=512,512)b2=tf.Variable(tf.random_normal(512)L2=tf.nn.relu(tf.matmul(L1,W2)+b2)L2=tf.nn.dropout(L2,keep_prob=keep_prob)#trainmymodelforepochinrange(training_epochs):.foriinrange(total_batch):batch_xs,batch_ys=mnist.train.next_batch(batch_size)feed_dict=X:batch_xs,Y:batch_ys,keep_prob:0.7c,_=sess.run(cost,optimizer,feed_dict=feed_dict)avg_cost+=c/total_batch#Testmodelandcheckaccuracycorrect_prediction=tf.equal(tf.argmax(hypothesis,1),tf.argmax(Y,1)accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)print(Accuracy:,sess.run(accuracy,feed_dict=X:mnist.test.images,Y:mnist.test.labels,keep_prob:1),27,NNtips,InitilizingweightsActivationfunctionsRegularizationOptimizers,28,Optimizers,train=tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost),/api_guides/python/train,29,Optimizers,train=tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost),tf.train.AdadeltaOptimizertf.train.AdagradOptimizertf.train.AdagradDAOptimizertf.train.MomentumOptimizertf.train.AdamOptimizertf.train.FtrlOptimizertf.train.ProximalGradientDescentOptimizertf.train.ProximalAdagradOptimizertf.train.RMSPropOptimizer,/api_guides/python/train,30,ADAM:amethodforstochasticoptimizationKingmaetal.2015,31,UseAdamOptimizer,#definecost/loss&optimizercost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hypothesis,labels=Y)optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost),32,TensorFlowUST,Day2:CNNSungKim,33,/,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,ConvolutionalNeuralNetworksforSentenceClassificationYoonKim,2014,66,CNNBasics,SungKimCode:,WithTF1.0!,67,CNN,http:/parse.ele.tue.nl/cluster/2/CNNArchitecture.jpg,68,CNNforCTimages,AsanMedicalCenter&MicrosoftMedicalBigdataContestWinnerbyGeunYoungLeeandAlexKim,69,Convolutionlayerandmaxpooling,70,SimpleconvolutionlayerStride:1x1,3x3x1,2x2x1filter,71,Toyimage,72,SimpleconvolutionlayerImage:1,3,3,1image,Filter:2,2,1,1,Stride:1x1,Padding:VALID,1,1,1,1,1.,1.,1.,1.shape=(2,2,1,1),73,Image:1,3,3,1image,Filter:2,2,1,1,Stride:1x1,Padding:VALID,74,1,1,1,1,0,0,0,0,3,3,SimpleconvolutionlayerImage:1,3,3,1image,Filter:2,2,1,1,Stride:1x1,Padding:SAME,75,Image:1,3,3,1image,Filter:2,2,1,1,Stride:1x1,Padding:SAME,76,77,MaxPooling,78,MNISTimageloading,79,MNISTConvolutionlayer,80,MNISTMaxpooling,81,CNNMNIST99%,SungKimCode:,WithTF1.0!,82,CNN,http:/parse.ele.tue.nl/cluster/2/CNNArchitecture.jpg,83,SimpleCNN,84,#inputplaceholdersX=tf.placeholder(tf.float32,None,784)X_img=tf.reshape(X,-1,28,28,1)#img28x28x1(black/white)Y=tf.placeholder(tf.float32,None,10)#L1ImgInshape=(?,28,28,1)W1=tf.Variable(tf.random_normal(3,3,1,32,stddev=0.01)#Conv-(?,28,28,32)#Pool-(?,14,14,32)L1=tf.nn.conv2d(X_img,W1,strides=1,1,1,1,padding=SAME)L1=tf.nn.relu(L1)L1=tf.nn.max_pool(L1,ksize=1,2,2,1,strides=1,2,2,1,padding=SAME)Tensor(Conv2D:0,shape=(?,28,28,32),dtype=float32)Tensor(Relu:0,shape=(?,28,28,32),dtype=float32)Tensor(MaxPool:0,shape=(?,14,14,32),dtype=float32),Convlayer1,85,Tensor(Conv2D:0,shape=(?,28,28,32),dtype=float32)Tensor(Relu:0,shape=(?,28,28,32),dtype=float32)Tensor(MaxPool:0,shape=(?,14,14,32),dtype=float32)#L2ImgInshape=(?,14,14,32)W2=tf.Variable(tf.random_normal(3,3,32,64,stddev=0.01)#Conv-(?,14,14,64)#Pool-(?,7,7,64)L2=tf.nn.conv2d(L1,W2,strides=1,1,1,1,padding=SAME)L2=tf.nn.relu(L2)L2=tf.nn.max_pool(L2,ksize=1,2,2,1,strides=1,2,2,1,padding=SAME)L2=tf.reshape(L2,-1,7*7*64)Tensor(Conv2D_1:0,shape=(?,14,14,64),dtype=float32)Tensor(Relu_1:0,shape=(?,14,14,64),dtype=float32)Tensor(MaxPool_1:0,shape=(?,7,7,64),dtype=float32)Tensor(Reshape_1:0,shape=(?,3136),dtype=float32),Convlayer2,86,Tensor(Conv2D_1:0,shape=(?,14,14,64),dtype=float32)Tensor(Relu_1:0,shape=(?,14,14,64),dtype=float32)Tensor(MaxPool_1:0,shape=(?,7,7,64),dtype=float32)Tensor(Reshape_1:0,shape=(?,3136),dtype=float32)L2=tf.reshape(L2,-1,7*7*64)#FinalFC7x7x64inputs-10outputsW3=tf.get_variable(W3,shape=7*7*64,10,initializer=tf.contrib.layers.xavier_initializer()b=tf.Variable(tf.random_normal(10)hypothesis=tf.matmul(L2,W3)+b#definecost/loss&optimizercost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hypothesis,labels=Y)optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost),FullyConnected(FC,Dense)layer,87,TrainingandEvaluation,#initializesess=tf.Session()sess.run(tf.global_variables_initializer()#trainmymodelprint(Learningstared.Ittakessometime.)forepochinrange(training_epochs):avg_cost=0total_batch=int(mnist.train.num_examples/batch_size)foriinrange(total_batch):batch_xs,batch_ys=mnist.train.next_batch(batch_size)feed_dict=X:batch_xs,Y:batch_ysc,_,=sess.run(cost,optimizer,feed_dict=feed_dict)avg_cost+=c/total_batchprint(Epoch:,%04d%(epoch+1),cost=,:.9f.format(avg_cost)print(LearningFinished!)#Testmodelandcheckaccuracycorrect_prediction=tf.equal(tf.argmax(hypothesis,1),tf.argmax(Y,1)accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)print(Accuracy:,sess.run(accuracy,feed_dict=X:mnist.test.images,Y:mnist.test.labels),88,#initializesess=tf.Session()sess.run(tf.global_variables_initializer()#trainmymodelprint(Learningstared.Ittakessometime.)forepochinrange(training_epochs):avg_cost=0total_batch=int(mnist.train.num_examples/batch_size)foriinrange(total_batch):batch_xs,batch_ys=mnist.train.next_batch(batch_size)feed_dict=X:batch_xs,Y:batch_ysc,_,=sess.run(cost,optimizer,feed_dict=feed_dict)avg_cost+=c/total_batchprint(Epoch:,%04d%(epoch+1),cost=,:.9f.format(avg_cost)print(LearningFinished!)#Testmodelandcheckaccuracycorrect_prediction=tf.equal(tf.argmax(hypothesis,1),tf.argmax(Y,1)accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)print(Accuracy:,sess.run(accuracy,feed_dict=X:mnist.test.images,Y:mnist.test.labels),Epoch:0001cost=0.340291267Epoch:0002cost=0.090731326Epoch:0003cost=0.064477619Epoch:0004cost=0.050683064.Epoch:0011cost=0.017758641Epoch:0012cost=0.014156652Epoch:0013cost=0.012397016Epoch:0014cost=0.010693789Epoch:0015cost=0.009469977LearningFinished!Accuracy:0.9885,TrainingandEvaluation,89,DeepCNN,Imagecredit:.hk/ccloy/project_target_code/index.html,90,#L3ImgInshape=(?,7,7,64)W3=tf.Variable(tf.random_normal(3,3,64,128,stddev=0.01)#Conv-(?,7,7,128)#Pool-(?,4,4,128)#Reshape-(?,4*4*128)#FlattenthemforFCL3=tf.nn.conv2d(L2,W3,strides=1,1,1,1,padding=SAME)L3=tf.nn.relu(L3)L3=tf.nn.max_pool(L3,ksize=1,2,2,1,strides=1,2,2,1,padding=SAME)L3=tf.nn.dropout(L3,keep_prob=keep_prob)L3=tf.reshape(L3,-1,128*4*4)Tensor(Conv2D_2:0,shape=(?,7,7,128),dtype=float32)Tensor(Relu_2:0,shape=(?,7,7,128),dtype=float32)Tensor(MaxPool_2:0,shape=(?,4,4,128),dtype=float32)Tensor(dropout_2/mul:0,shape=(?,4,4,128),dtype=float32)Tensor(Reshape_1:0,shape=(?,2048),dtype=float32)#L4FC4x4x128inputs-625outputsW4=tf.get_variable(W4,shape=128*4*4,625,initializer=tf.contrib.layers.xavier_initializer()b4=tf.Variable(tf.random_normal(625)L4=tf.nn.relu(tf.matmul(L3,W4)+b4)L4=tf.nn.dropout(L4,keep_prob=keep_prob)Tensor(Relu_3:0,shape=(?,625),dtype=float32)Tensor(dropout_3/mul:0,shape=(?,625),dtype=float32)#L5FinalFC625inputs-10outputsW5=tf.get_variable(W5,shape=625,10,initializer=tf.contrib.layers.xavier_initializer()b5=tf.Variable(tf.random_normal(10)hypothesis=tf.matmul(L4,W5)+b5Tensor(add_1:0,shape=(?,10),dtype=float32),DeepCNN,#L1ImgInshape=(?,28,28,1)W1=tf.Variable(tf.random_normal(3,3,1,32,stddev=0.01)#Conv-(?,28,28,32)#Pool-(?,14,14,32)L1=tf.nn.conv2d(X_img,W1,strides=1,1,1,1,padding=SAME)L1=tf.nn.relu(L1)L1=tf.nn.max_pool(L1,ksize=1,2,2,1,strides=1,2,2,1,padding=SAME)L1=tf.nn.dropout(L1,keep_prob=keep_prob)Tensor(Conv2D:0,shape=(?,28,28,32),dtype=float32)Tensor(Relu:0,shape=(?,28,28,32),dtype=float32)Tensor(MaxPool:0,shape=(?,14,14,32),dtype=float32)Tensor(dropout/mul:0,shape=(?,14,14,32),dtype=float32)#L2ImgInshape=(?,14,14,32)W2=tf.Variable(tf.random_normal(3,3,32,64,stddev=0.01)#Conv-(?,14,14,64)#Pool-(?,7,7,64)L2=tf.nn.conv2d(L1,W2,strides=1,1,1,1,padding=SAME)L2=tf.nn.relu(L2)L2=tf.nn.max_pool(L2,ksize=1,2,2,1,strides=1,2,2,1,padding=SAME)L2=tf.nn.dropout(L2,keep_prob=keep_prob)Tensor(Conv2D_1:0,shape=(?,14,14,64),dtype=float32)Tensor(Relu_1:0,shape=(?,14,14,64),dtype=float32)Tensor(MaxPool_1:0,shape=(?,7,7,64),dtype=float32)Tensor(dropout_1/mul:0,shape=(?,7,7,64),dtype=float32),91,DeepCNN,#L1ImgInshape=(?,28,28,1)W1=tf.Variable(tf.random_normal(3,3,1,32,stddev=0.01)#Conv-(?,28,28,32)#Pool-(?,14,14,32)L1=tf.nn.conv2d(X_img,W1,strides=1,1,1,1,padding=SAME)L1=tf.nn.relu(L1)L1=tf.nn.max_pool(L1,ksize=1,2,2,1,strides=1,2,2,1,padding=SAME)L1=tf.nn.dropout(L1,keep_prob=keep_prob)Tensor(Conv2D:0,shape=(?,28,28,32),dtype=float32)Tensor(Relu:0,shape=(?,28,28,32),dtype=float32)Tensor(MaxPool:0,shape=(?,14,14,32),dtype=float32)Tensor(dropout/mul:0,shape=(?,14,14,32),dtype=float32).#L4FC4x4x128inputs-625outputsW4=tf.get_variable(W4,shape=128*4*4,625,initializer=tf.contrib.layers.xavier_initializer()b4=tf.Variable(tf.random_normal(625)L4=tf.nn.relu(tf.matmul(L3,W4)+b4)L4=tf.nn.dropout(L4,keep_prob=keep_prob)Tensor(Relu_3:0,shape=(?,625),dtype=float32)Tensor(dropout_3/mul:0,shape=(?,625),dtype=float32)#L5FinalFC625inputs-10outputsW5=tf.get_variable(W5,shape=625,10,initializer=tf.contrib.layers.xavier_initializer()b5=tf.Variable(tf.random_normal(10)hypothesis=tf.matmul(L4,W5)+b5Tensor(add_1:0,shape=(?,10),dtype=float32),#Testmodelandcheckaccuracycorrect_prediction=tf.equal(tf.argmax(hypothesis,1),tf.argmax(Y,1)accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)print(Accuracy:,sess.run(accuracy,feed_dict=X:mnist.test.images,Y:mnist.test.labels,keep_prob:1),Epoch:0013cost=0.027188021Epoch:0014cost=0.023604777Epoch:0015cost=0.024607201LearningFinished!Accuracy:0.9938,92,Class,Layers,Ensemble,SungKimCode:,WithTF1.0!,93,CNN,#L1ImgInshape=(?,28,28,1)W1=tf.Variable(tf.random_normal(3,3,1,32,stddev=0.01)#Conv-(?,28,28,32)#Pool-(?,14,14,32)L1=tf.nn.conv2d(X_img,W1,strides=1,1,1,1,padding=SAME)L1=tf.nn.relu(L1)L1=tf.nn.max_pool(L1,ksize=1,2,2,1,strides=1,2,2,1,padding=SAME)L1=tf.nn.dropout(L1,keep_prob=keep_prob)Tensor(Conv2D:0,shape=(?,28,28,32),dtype=float32)Tensor(Relu:0,shape=(?,28,28,32),dtype=float32)Tensor(MaxPool:0,shape=(?,14,14,32),dtype=float32)Tensor(dropout/mul:0,shape=(?,14,14,32),dtype=float32).#L4FC4x4x128inputs-625outputsW4=tf.get_variable(W4,shape=128*4*4,625,initializer=tf.contrib.layers.xavier_initializer()b4=tf.Variable(tf.random_normal(625)L4=tf.nn.relu(tf.matmul(L3,W4)+b4)L4=tf.nn.dropout(L4,keep_prob=keep_prob)Tensor(Relu_3:0,shape=(?,625),dtype=float32)Tensor(dropout_3/mul:0,shape=(?,625),dtype=float32)#L5FinalFC625inputs-10outputsW5=tf.get_variable(W5,shape=625,10,initializer=tf.contrib.layers.xavier_initializer()b5=tf.Variable(tf.random_normal(10)hypothesis=tf.matmul(L4,W5)+b5Tensor(add_1:0,shape=(?,10),dtype=float32),#Testmodelandcheckaccuracycorrect_prediction=tf.equal(tf.argmax(hypothesis,1),tf.argmax(Y,1)accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)print(Accuracy:,sess.run(accuracy,feed_dict=X:mnist.test.images,Y:mnist.test.labels,keep_prob:1),Epoch:0013cost=0.027188021Epoch:0014cost=0.023604777Epoch:0015cost=0.024607201LearningFinished!Accuracy:0.9938,94,PythonClass,classModel:def_init_(self,sess,name):self.sess==nameself._build_net()def_build_net(self):withtf.variable_scope():#inputplaceholdersself.X=tf.placeholder(tf.float32,None,784)#img28x28x1(black/white)X_img=tf.reshape(self.X,-1,28,28,1)self.Y=tf.placeholder(tf.float32,None,10)#L1ImgInshape=(?,28,28,1)W1=tf.Variable(tf.random_normal(3,3,1,32,stddev=0.01).defpredict(self,x_test,keep_prop=1.0):returnself.sess.run(self.logits,feed_dict=self.X:x_test,self.keep_prob:keep_prop)defget_accuracy(self,x_test,y_test,keep_prop=1.0):returnself.sess.run(self.accuracy,feed_dict=self.X:x_test,self.Y:y_test,self.keep_prob:keep_prop)deftrain(self,x_data,y_data,keep_prop=0.7):returnself.sess.run(self.cost,self.optimizer,feed_dict=self.X:x_data,self.Y:y_data,self.keep_prob:keep_prop),#initializesess=tf.Session()m1=Model(sess,m1)sess.run(tf.global_variables_initializer()print(LearningStarted!)#trainmymodelforepochinrange(training_epochs):avg_cost=0total_batch=int(mnist.train.num_examples/batch_size)foriinrange(total_batch):batch_xs,batch_ys=mnist.train.next_batch(batch_size)c,_=m1.train(batch_xs,batch_ys)avg_cost+=c/total_batc,95,tf.layers,/api_docs/python/tf/layers,96,tf.layers,#L1ImgInshape=(?,28,28,1)W1=tf.Variable(tf.random_normal(3,3,1,32,stddev=0.01)#Conv-(?,28,28,32)#Pool-(?,14,14,32)L1=tf.nn.conv2d(X_img,W1,strides=1,1,1,1,padding=SAME)L1=tf.nn.relu(L1)L1=tf.nn.max_pool(L1,ksize=1,2,2,1,strides=1,2,2,1,padding=SAME)L1=tf.nn.dropout(L1,keep_prob=self.keep_prob)#L2ImgInshape=(?,14,14,32)W2=tf.Variable(tf.random_normal(3,3,32,64,stddev=0.01),#ConvolutionalLayer#1conv1=tf.layers.conv2d(inputs=X_img,filters=32,kernel_size=3,3,padding=SAME,activation=tf.nn.relu)pool1=tf.layers.max_pooling2d(inputs=conv1,pool_size=2,2,padding=SAME,strides=2)dropout1=tf.layers.dropout(inputs=pool1,rate=0.7,training=self.training)#ConvolutionalLayer#2conv2=tf.layers.conv2d(inputs=dropout1,filters=64,kernel_size=3,3,padding=SAME,activation=tf.nn.relu)flat=tf.reshape(dropout3,-1,1

温馨提示

  • 1. 本站所有资源如无特殊说明,都需要本地电脑安装OFFICE2007和PDF阅读器。图纸软件为CAD,CAXA,PROE,UG,SolidWorks等.压缩文件请下载最新的WinRAR软件解压。
  • 2. 本站的文档不包含任何第三方提供的附件图纸等,如果需要附件,请联系上传者。文件的所有权益归上传用户所有。
  • 3. 本站RAR压缩包中若带图纸,网页内容里面会有图纸预览,若没有图纸预览就没有图纸。
  • 4. 未经权益所有人同意不得将文件中的内容挪作商业或盈利用途。
  • 5. 人人文库网仅提供信息存储空间,仅对用户上传内容的表现方式做保护处理,对用户上传分享的文档内容本身不做任何修改或编辑,并不能对任何下载内容负责。
  • 6. 下载文件中如有侵权或不适当内容,请与我们联系,我们立即纠正。
  • 7. 本站不保证下载资源的准确性、安全性和完整性, 同时也不承担用户因使用这些下载资源对自己和他人造成任何形式的伤害或损失。

评论

0/150

提交评论