%% Buidling a 1-D CNN
filterSize = 3;
numFilters = 64;
numClasses=10;
layers = [ …
sequenceInputLayer(15, MinLength=12)
convolution1dLayer(filterSize,numFilters,Padding=“causal”)
reluLayer
layerNormalizationLayer
maxPooling1dLayer(2)
convolution1dLayer(filterSize,2*numFilters,Padding=“causal”)
reluLayer
layerNormalizationLayer
globalMaxPooling1dLayer
fullyConnectedLayer(512)
reluLayer
fullyConnectedLayer(128)
reluLayer
fullyConnectedLayer(1)
reluLayer];
% analyzeNetwork(layers);
lgraph = layerGraph(layers);
net = dlnetwork(lgraph);
%% Using the pre-trained model or training the model
% If you want to train the model from scratch, change “train_network” value
% to true.
train_network = true;
if train_network
miniBatchSize = 1;
numEpochs = 10;
numObservations = numel(inputTrain.Files);
numIterationsPerEpoch = floor(numObservations./miniBatchSize);
averageGrad = [];
averageSqGrad = [];
numIterations = numEpochs * numIterationsPerEpoch;
monitor = trainingProgressMonitor(Metrics=“Loss”,Info=“Epoch”,XLabel=“Iteration”);
iteration = 0;
epoch = 0;
while epoch < numEpochs && ~monitor.Stop
epoch = epoch + 1;
% Shuffle data.
shuffle(mbq);
reset(mbq_val);
while hasdata(mbq) && ~monitor.Stop
iteration = iteration + 1;
% Read mini-batch of data.
[X,T] = next(mbq);
% Convert mini-batch of data to a dlarray.
X = dlarray(single(X),“CBT”);
% We read each image with the size of [15 * 12 * 256 * 256] and
% convert it to a [channel_size(C) = 15 batch_size(B) = (256*256) temporal_size(T) = 12]
% We had to use a batch size smaller than 65501.
% We got errors when we used batch size above this value.
X = X(:,1:65500,:);
T = T(:,1:65500,:);
% If training on a GPU, then convert data to a gpuArray.
if canUseGPU
X = gpuArray(X);
T= gpuArray(T);
end
% Calculate loss and gradients using the helper loss function.
[loss,gradients] = dlfeval(@modelLoss,net,X,T);
% Update the network parameters using the Adam optimizer.
[net,averageGrad,averageSqGrad] = adamupdate(net,gradients,averageGrad,averageSqGrad,iteration);
% Update the training progress monitor.
recordMetrics(monitor,iteration,Loss=loss);
updateInfo(monitor,Epoch=epoch + ” of ” + numEpochs);
monitor.Progress = 100 * iteration/numIterations;
end
% Validation error
ii=0;
while hasdata(mbq_val)
[X_val, T_val]= next(mbq_val);
if canUseGPU
X_val = dlarray(single(X_val),“CBT”);
T_val = gpuArray(T_val);
end
Y_val = predict(net,X_val);
error = mse(Y_val, T_val)^.5;
ii=ii+1;
rmse_error(ii) = extractdata(gather(error));
% disp(rmse_error(ii))
end
disp([‘Epoch’+ string(epoch)+‘ Validation Error(RMSE): ‘ , mean(rmse_error)])
end
save(‘trainedNetwork_conv1d_submit.mat’,‘net’)
else
lgraph = load(‘trainedNetwork_conv1d.mat’);% Load pre-trained network
net = lgraph.net;
评论
要发表评论,请点击 此处 登录到您的 MathWorks 帐户或创建一个新帐户。