¿Hay algún kit de herramientas LSTM disponible en MATLAB?

Hola,

También busqué LSTM usando el kit de herramientas MATLAB Neural Network y no pude encontrar ninguno. Así que lo implementé yo mismo usando el kit de herramientas matlab. Necesito mejorar la lectura, pero aquí está el código:

función net1 = create_LSTM_network (input_size, before_layers, before_activation, hidden_size, after_layers, after_activations, output_size)
%% esta parte divide la entrada en dos partes separadas la primera parte
% es el tamaño de entrada y la segunda parte es la memoria
real_input_size = input_size;
N_before = length (before_layers);
N_after = longitud (after_layers);
delays_vec = 1;
if (N_before> 0) && (N_after> 0)
input_size = before_layers (final);
net1 = fitnet ([before_layers, input_size + hidden_size, hidden_size * ones (1,9), after_layers]);
elseif (N_before> 0) && (N_after == 0)
input_size = before_layers (final);
net1 = fitnet ([before_layers, input_size + hidden_size, hidden_size * ones (1, 9)]);
elseif (N_before == 0) && (N_after> 0)
net1 = fitnet ([input_size + hidden_ ​​size, hidden_size * ones (1, 9), after_layers]);
más
net1 = fitnet ([tamaño de entrada + tamaño oculto, tamaño oculto * unos (1, 9)]);
fin
net1 = configure (net1, rand (real_input_size, 200), rand (output_size, 200));
%% concatenación
net1.layers {N_before + 1} .name = ‘Capa de concatenación’;
net1.layers {N_before + 2} .name = ‘Olvidar cantidad’;
net1.layers {N_before + 3} .name = ‘Olvidar Gate’;
net1.layers {N_before + 4} .name = ‘Recordar cantidad’;
net1.layers {N_before + 5} .name = ‘entrada tanh’;
net1.layers {N_before + 6} .name = ‘Olvidar Gate’;
net1.layers {N_before + 7} .name = ‘Actualizar memoria’;
net1.layers {N_before + 8} .name = ‘tanh Memory’;
net1.layers {N_before + 9} .name = ‘Combine Amount’;
net1.layers {N_before + 10} .name = ‘Combinar puerta’;
net1.layerConnect (N_before + 3, N_before + 7) = 1;
net1.layerConnect (N_before + 1, N_before + 10) = 1;
net1.layerConnect (N_before + 4, N_before + 3) = 0;
net1.layerWeights {N_before + 1, N_before + 10} .delays = delays_vec;
si N_antes> 0
net1.LW {N_before + 1, N_before} = [eye (input_size); ceros (tamaño oculto, tamaño de entrada)];
más
net1.IW {1,1} = [eye (input_size); ceros (hidden_size, input_size)];
fin
net1.LW {N_before + 1, N_before + 10} = repmat ([ceros (input_size, hidden_size); eye (hidden_size)], [1, size (delays_vec, 2)]);
net1.layers {N_before + 1} .transferFcn = ‘purelin’;
net1.layerWeights {N_before + 1, N_before + 10} .learn = false;
si N_antes> 0
net1.layerWeights {N_before + 1, N_before} .learn = false;
más
net1.inputWeights {1, 1} .learn = false;
fin
%%
net1.biasConnect = [unos (1, N_antes) 0 1 0 1 1 0 0 0 1 0 1 unos (1, N_después)] ‘;%
%% primera puerta
net1.layers {N_before + 2} .transferFcn = ‘logsig’;
net1.layerWeights {N_before + 3, N_before + 2} .weightFcn = ‘scalprod’;
% net1 .layerWeights {3, 7} .weightFcn = ‘scalprod’;
net1.layerWeights {N_before + 3, N_before + 2} .learn = false;
net1.layerWeights {N_before + 3, N_before + 7} .learn = false;
net1.layers {N_before + 3} .netinputFcn = ‘netprod’;
net1.layers {N_before + 3} .transferFcn = ‘purelin’;
net1.LW {N_before + 3, N_before + 2} = 1;
% net1.LW {3, 7} = 1;
%% segunda puerta
net1.layerConnect (N_before + 4, N_before + 1) = 1;
net1.layers {N_before + 4} .transferFcn = ‘logsig’;
%% tanh
net1.layerConnect (N_before + 5, N_before + 4) = 0;
net1.layerConnect (N_before + 5, N_before + 1) = 1;
%% segunda puerta mult
net1.layerConnect (N_before + 6, N_before + 4) = 1;
net1.layers {N_before + 6} .netinputFcn = ‘netprod’;
net1.layers {N_before + 6} .transferFcn = ‘purelin’;
net1.layerWeights {N_before + 6, N_before + 5} .weightFcn = ‘scalprod’;
net1.layerWeights {N_before + 6, N_before + 4} .weightFcn = ‘scalprod’;
net1.layerWeights {N_before + 6, N_before + 5} .learn = false;
net1.layerWeights {N_before + 6, N_before + 4} .learn = false;
net1.LW {N_before + 6, N_before + 5} = 1;
net1.LW {N_before + 6, N_before + 4} = 1;
%% C actualización
delays_vec = 1;
net1.layerConnect (N_before + 7, N_before + 3) = 1;
net1.layerWeights {N_before + 3, N_before + 7}. retrasos = delays_vec;
net1.layerWeights {N_before + 7, N_before + 3} .weightFcn = ‘scalprod’;
net1.layerWeights {N_before + 7, N_before + 6} .weightFcn = ‘scalprod’;
net1 .layers {N_before + 7} .transferFcn = ‘purelin’;
net1.LW {N_before + 7, N_before + 3} = 1;
net1.LW {N_before + 7, N_before + 6} = 1;
net1.LW {N_before + 3, N_before + 7} = repmat (eye (hidden_size), [1, size (delays_vec, 2)]);
net1.layerWeights {N_before + 3, N_before + 7} .learn = false;
net1.layerWeights {N_before + 7, N_before + 6} .learn = false;
net1.layerWeights {N_before + 7, N_before + 3} .learn = false;
Etapa de salida %%
net1.layerConnect (N_before + 9, N_before + 8) = 0;
net1.layerConnect (N_before + 10, N_before + 8) = 1;
net1.layerConnect (N_before + 9, N_before + 1) = 1;
net1.layerWeights {N_before + 10, N_before + 8} .weightFcn = ‘scalprod’;
net1.layerWeights {N_before + 10, N_before + 9} .weightFcn = ‘scalprod’;
net1.LW {N_before +10, N_before + 9} = 1;
net1.LW {N_before + 10, N_before + 8} = 1;
net1.layers {N_before + 10} .netinputFcn = ‘netprod’;
net1.layers {N_before + 10} .transferFcn = ‘purelin’;
net1.layers {N_before + 9} .transferFcn = ‘logsig’;
net1.layers {N_before + 5} .transferFcn = ‘tansig’;
net1.layers {N_before + 8} .transferFcn = ‘tansig’;
net1.layerWeights {N_before + 10, N_before + 9} .learn = false;
net1.layerWeights {N_before + 10, N_before + 8} .learn = false;
net1.layerWeights {N_before + 7, N_before + 3}. aprender = falso;
para ll = 1: N_antes
net1.layers {ll} .transferFcn = before_activation;
fin
para ll = 1: N_después
net1. capas {end-ll} .transferFcn = after_activations;
fin

net1.layerWeights {N_before + 8, N_before + 7} .weightFcn = ‘scalprod’;
net1.LW {N_before + 8, N_before + 7} = 1;
net1.layerWeights {N_before + 8, N_before + 7} .learn = false;
%%
net1 = configure (net1, rand (real_input_size, 200), rand (output_size, 200));
net1.trainFcn = ‘trainlm’;

Espero que esto ayude…

Oshri