首页
学习
活动
专区
圈层
工具
发布
社区首页 >问答首页 >Deeplearning4j LSTM输出大小

Deeplearning4j LSTM输出大小
EN

Stack Overflow用户
提问于 2016-07-17 15:23:11
回答 3查看 955关注 0票数 2

在我的情况下-在输入端有List<List<Float>> (单词表示向量列表)。And -从一个序列输出一个Double

所以我构建了下一个结构(第一个索引-示例编号,第二个句子项目编号,第三个单词向量元素编号):http://pastebin.com/KGdjwnki

在输出中:http://pastebin.com/fY8zrxEL

但是当我掌握model.output的下一个(http://pastebin.com/wvFFC4Hw)时-我得到向量[0.25, 0.24, 0.25, 0.25],而不是一个值。

会出什么问题呢?附加代码(在Kotlin)。classCount就是其中之一。

代码语言:javascript
复制
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork
import org.deeplearning4j.nn.conf.NeuralNetConfiguration.Builder
import org.deeplearning4j.nn.api.OptimizationAlgorithm
import org.deeplearning4j.nn.conf.Updater
import org.deeplearning4j.nn.weights.WeightInit
import org.deeplearning4j.nn.conf.layers.GravesLSTM
import org.deeplearning4j.nn.conf.layers.RnnOutputLayer
import org.deeplearning4j.nn.conf.BackpropType
import org.nd4j.linalg.api.ndarray.INDArray
import org.nd4j.linalg.cpu.nativecpu.NDArray
import org.nd4j.linalg.indexing.NDArrayIndex
import org.nd4j.linalg.factory.Nd4j
import org.nd4j.linalg.lossfunctions.LossFunctions
import java.util.*

class ClassifierNetwork(wordVectorSize: Int, classCount: Int) {
    data class Dimension(val x: Array<Int>, val y: Array<Int>)
    val model: MultiLayerNetwork
    val optimization = OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT
    val iterations = 1
    val learningRate = 0.1
    val rmsDecay = 0.95
    val seed = 12345
    val l2 = 0.001
    val weightInit = WeightInit.XAVIER
    val updater = Updater.RMSPROP
    val backtropType = BackpropType.TruncatedBPTT
    val tbpttLength = 50
    val epochs = 50
    var dimensions = Dimension(intArrayOf(0).toTypedArray(), intArrayOf(0).toTypedArray())

    init {
        val baseConfiguration = Builder().optimizationAlgo(optimization)
                .iterations(iterations).learningRate(learningRate).rmsDecay(rmsDecay).seed(seed).regularization(true).l2(l2)
                .weightInit(weightInit).updater(updater)
                .list()
        baseConfiguration.layer(0, GravesLSTM.Builder().nIn(wordVectorSize).nOut(64).activation("tanh").build())
        baseConfiguration.layer(1, GravesLSTM.Builder().nIn(64).nOut(32).activation("tanh").build())
        baseConfiguration.layer(2, GravesLSTM.Builder().nIn(32).nOut(16).activation("tanh").build())
        baseConfiguration.layer(3, RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
                .activation("softmax").weightInit(WeightInit.XAVIER).nIn(16).nOut(classCount).build())
        val cfg = baseConfiguration.build()!!
        cfg.backpropType = backtropType
        cfg.tbpttBackLength = tbpttLength
        cfg.tbpttFwdLength = tbpttLength
        cfg.isPretrain = false
        cfg.isBackprop = true
        model = MultiLayerNetwork(cfg)
    }

    private fun dataDimensions(x: List<List<Array<Double>>>, y: List<Array<Double>>): Dimension {
        assert(x.size == y.size)
        val exampleCount = x.size
        assert(x.size > 0)
        val sentenceLength = x[0].size
        assert(sentenceLength > 0)
        val wordVectorLength = x[0][0].size
        assert(wordVectorLength > 0)
        val classCount = y[0].size
        assert(classCount > 0)
        return Dimension(
                intArrayOf(exampleCount, wordVectorLength, sentenceLength).toTypedArray(),
                intArrayOf(exampleCount, classCount).toTypedArray()
        )
    }

    data class Fits(val x: INDArray, val y: INDArray)
    private fun fitConversion(x: List<List<Array<Double>>>, y: List<Array<Double>>): Fits {
        val dim = dataDimensions(x, y)
        val xItems = ArrayList<INDArray>()
        for (i in 0..dim.x[0]-1) {
            val itemList = ArrayList<DoubleArray>();
            for (j in 0..dim.x[1]-1) {
                var rowList = ArrayList<Double>()
                for (k in 0..dim.x[2]-1) {
                    rowList.add(x[i][k][j])
                }
                itemList.add(rowList.toTypedArray().toDoubleArray())
            }
            xItems.add(Nd4j.create(itemList.toTypedArray()))
        }
        val xFits = Nd4j.create(xItems, dim.x.toIntArray(), 'c')
        val yItems = ArrayList<DoubleArray>();
        for (i in 0..y.size-1) {
            yItems.add(y[i].toDoubleArray())
        }
        val yFits = Nd4j.create(yItems.toTypedArray())
        return Fits(xFits, yFits)
    }

    private fun error(epoch: Int, x: List<List<Array<Double>>>, y: List<Array<Double>>) {
        var totalDiff = 0.0
        for (i in 0..x.size-1) {
            val source = x[i]
            val result = y[i]
            val realResult = predict(source)
            var diff = 0.0
            for (j in 0..result.size-1) {
                val elementDiff = result[j] - realResult[j]
                diff += Math.pow(elementDiff, 2.0)
            }
            diff = Math.sqrt(diff)
            totalDiff += Math.pow(diff, 2.0)
        }
        totalDiff = Math.sqrt(totalDiff)
        print("Epoch ")
        print(epoch)
        print(", diff ")
        println(totalDiff)
    }

    fun train(x: List<List<Array<Double>>>, y: List<Array<Double>>) {
        dimensions = dataDimensions(x, y)
        val(xFit, yFit) = fitConversion(x, y)
        for (i in 0..epochs-1) {
            model.input = xFit
            model.labels = yFit
            model.fit()
            error(i+1, x, y)
        }
    }

    fun predict(x: List<Array<Double>>): Array<Double> {
        val xList = ArrayList<DoubleArray>();
        for (i in 0..dimensions.x[1]-1) {
            var row = ArrayList<Double>()
            for (j in 0..dimensions.x[2]-1) {
                row.add(x[j][i])
            }
            xList.add(row.toDoubleArray())
        }
        val xItem = Nd4j.create(xList.toTypedArray())
        val y = model.output(xItem)
        val result = ArrayList<Double>()
        return result.toTypedArray()
    }
}

upd。似乎下一个示例有"near“任务,所以稍后我将检查它并发布解决方案:https://github.com/deeplearning4j/dl4j-0.4-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/recurrent/word2vecsentiment/Word2VecSentimentRNN.java

EN

回答 3

Stack Overflow用户

发布于 2016-07-25 20:10:52

LSTM输入/输出只能是秩3:请参阅:http://deeplearning4j.org/usingrnns

票数 1
EN

Stack Overflow用户

发布于 2016-08-09 15:51:48

除了在非常活跃的gitter中发布这篇文章的建议,以及Adam的提示来查看伟大的文档之外,我还想指出代码中的其他一些事情,因为我正在努力解决类似的问题:

查看examples/recurrent/basic/BasicRNNExample.java,中的基本示例,这里您可以看到,对于RNN,您没有使用身份(model.output),但是类数为1,您似乎正在执行回归,为此,还可以查看examples/feedforward/regression/RegressionSum.java

  • model.output here中的回归示例,您可以在此处看到,作为激活函数,您应该使用“xItem”。"softmax“实际上将输出归一化为1(请参见glossary中的),因此如果您只有一个输出,它将始终输出1(至少对于我的问题是这样)。
票数 1
EN

Stack Overflow用户

发布于 2017-12-13 13:20:13

不确定我是否正确理解了您的要求,但如果您想要单输出(即预测一个数字或回归),您通常会使用身份激活和MSE损失函数。您已经使用了softmax,它通常用于分类。

票数 0
EN
页面原文内容由Stack Overflow提供。腾讯云小微IT领域专用引擎提供翻译支持
原文链接:

https://stackoverflow.com/questions/38418859

复制
相关文章

相似问题

领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档