我这样做是为了在所有神经元之间共享learning_rate:

class neural_network {
public:
  neural_network(float learning_rate = 0.005f)
      : learning_rate(new float(learning_rate)){};
  shared_ptr<float> learning_rate;

private:
  vector<neuron> neurons;
};

class neuron {
public:
  neuron(const float learning_rate) {
    this->learningRate = make_shared<float>(learningRate);
  };

private:
  const shared_ptr<const float> learning_rate;
};

在我所有的神经元上都使用相同的learning_rate是一个很好的解决方案吗?

最佳答案

shared_ptr相当昂贵,我在这里看不到它的需要,只有网络需要“拥有”学习率。不要害怕在适当的地方使用原始指针,只是避免newdelete:

class neuron {
public:
    neuron(const float& learning_rate)
        : learning_rate(&learning_rate){};

private:
    const float* learning_rate;
};

class neural_network {
public:
    neural_network(float learning_rate = 0.005f)
        : learning_rate(learning_rate){};
    float learning_rate;

    void make_neuron()
    {
        neurons.push_back(neuron(learning_rate));
    }

private:
    vector<neuron> neurons;
};

10-08 11:25