我正在尝试用tensorflow.js来建立和训练一个lstm网络,我的数据集就像
输入:“我不喜欢这些鞋,它们对我来说太黄了。所以把它们还给我。”预期输出原因:“颜色”
我可以使用预先训练的word2vec模型将文本表示为向量。我试着阅读了tensorflow.js文档,但是我无法理解它,即使是从其他来源也找不到一个关于如何在tensorflow.js中实现和训练网络的好例子。
有谁能给出一个用node.js和tensorflow.js直接创建LSTM层并训练模型的例子吗?
发布于 2018-09-28 07:01:44
下面是一个实现Word2Vec的具有多层LSTM的RNN示例.我不认为您需要调整它来匹配您的用例--可能只有数据源和超参数。当然,您也需要编写自己的测试函数。由于您说您可以提供您自己的预先训练的Word2Vec向量,如果您愿意的话,您可能还可以删除相当大一部分代码。
我应该注意--我写了这个例子,我的首选是使用一个名为努姆斯的库来创建向量(它是一个类似于Numpy的库),然后将它们转换为Tensorflow.js使用的张量。这并不是Tensorflow.js被设计用来使用的方式,但是我发现他们创建张量的方法太麻烦了,而且不直观。
总之,这是我的例子(使用node.js)。我希望你发现它有用。
require('@tensorflow/tfjs-node')
const tf = require('@tensorflow/tfjs');
const fs = require('fs');
var nj = require('numjs');
const maxlen = 30;
const step = 3;
fs.readFile(<file>, 'utf8', function (error, data) {
if (error) throw error;
var text = data.toString();
create_model(text)
});
function onlyUnique(value, index, self) {
return self.indexOf(value) === index;
}
function indexOfMax(arr) {
if (arr.length === 0) {
return -1;
}
var max = arr[0];
var maxIndex = 0;
for (var i = 1; i < arr.length; i++) {
if (arr[i] > max) {
maxIndex = i;
max = arr[i];
}
}
return maxIndex;
}
function sample(preds, temperature) {
preds = nj.array(preds, 'float64');
preds = nj.log(preds).divide(temperature)
exp_preds = nj.exp(preds)
preds = exp_preds.divide(nj.sum(exp_preds))
arr = preds.tolist()
return indexOfMax(arr)
}
async function create_model(text) {
/* data prep */
text = text.toLowerCase()
console.log('corpus length:', text.length)
var words = text.replace(/(\r\n\t|\n|\r\t)/gm," ").split(" ")
words = words.filter(onlyUnique)
words = words.sort()
words = words.filter(String)
console.log("total number of unique words" + words.length)
var word_indices = {}
var indices_word = {}
for (let e0 of words.entries()) {
var idx = e0[0]
var word = e0[1]
word_indices[word] = idx
indices_word[idx] = word
}
console.log("maxlen: " + maxlen, " step: " + step)
var sentences = []
var sentences1 = []
var next_words = []
list_words = text.toLowerCase().replace(/(\r\n\t|\n|\r\t)/gm," ").split(" ").filter(String)
console.log('list_words ' + list_words.length)
for (var i = 0; i < (list_words.length - maxlen); i += step) {
var sentences2 = list_words.slice(i, i + maxlen).join(" ")
sentences.push(sentences2)
next_words.push(list_words[i + maxlen])
}
console.log('nb sequences(length of sentences):', sentences.length)
console.log("length of next_word", next_words.length)
console.log('Vectorization...')
var X = nj.zeros([sentences.length, maxlen, words.length])
console.log('X shape' + X.shape)
var y = nj.zeros([sentences.length, words.length])
console.log('y shape' + y.shape)
for (let e of sentences.entries()) {
var i = e[0]
var sentence = e[1]
for (let e2 of sentence.split(" ").entries()) {
var t = e2[0]
var word = e2[1]
X.set(i, t, word_indices[word], 1)
}
y.set(i, word_indices[next_words[i]], 1)
}
console.log('Creating model... Please wait.');
console.log("MAXLEN " + maxlen + ", words.length " + words.length)
var model = tf.sequential();
model.add(tf.layers.lstm({
units: 128,
returnSequences: true,
inputShape: [maxlen, words.length]
}));
model.add(tf.layers.dropout(0.2))
model.add(tf.layers.lstm({
units: 128,
returnSequences: false
}));
model.add(tf.layers.dropout(0.2))
model.add(tf.layers.dense({units: words.length, activation: 'softmax'}));
model.compile({loss: 'categoricalCrossentropy', optimizer: tf.train.rmsprop(0.002)});
x_tensor = tf.tensor3d(X.tolist(), null, 'bool')
//x_tensor.print(true)
y_tensor = tf.tensor2d(y.tolist(), null, 'bool')
//y_tensor.print(true)
/* training */
await model.fit(x_tensor, y_tensor, {
epochs: 100,
batchSize: 32,
callbacks: {
onEpochEnd: async (epoch, logs) => {
console.log(logs.loss + ",")
}
}
})https://stackoverflow.com/questions/52546444
复制相似问题