Я использую PyBrain 0.3 (с python 2.7.9), и когда я тренирую свою сеть с BackpropTrainer, нет никаких изменений, это общая ошибка, даже после некоторых 10 - 20 итераций.Оценка ошибки PyBrain со временем не меняется при обучении сети
Любые предложения?
Edit:
path = "../pckld_ROIs/master_r19_7_5_emissivity_sub.pkl"
roi = RegionsOfInterest(path)
roi.set_aggregate(True)
net = ClassificationNet(roi, ['soil'], neigborhood_size=3)
net.neural_net.randomize()
net.set_trainer(learning_rate=0.1, verbose=True, momentum=0.1, weight_decay=0.01)
net.train_network(max_epochs=300, verbose=True)
RegionsOfInterest
Где это структура для проведения и структурирования точек данных.
def set_trainer(self, learning_rate=0.01, lrdecay=1.0,
momentum=0., verbose=False, batch_learning=False,
weight_decay=0.):
"""
Sets the trainer. If the data has been split, it uses the training data as data for the back-propagation
algorithm. If not, it uses the entire data set.
:param learning_rate: The rate in which the parameters are changed into the direction of the gradient.
:param lrdecay: How much the learning rate decreases per epoch. Multiplicative!
:param momentum: The weight of the previous time-step's gradient is affecting the next iteration.
:param verbose: Toggles verbose mode. Default is off.
:param batch_learning: Will be parameters be updated at the end of the epoch, or continuously? The default
is continuously.
:param weight_decay: How much the weighs are decreasing. 0 corresponds to no decrease.
:type learning_rate: float
:type lrdecay: float
:type momentum: float
:type verbose: bool
:type batch_learning: bool
:type weight_decay: float
:return: None
"""
if self.training_data is not None:
self.trainer = BackpropTrainer(self.neural_net, self.training_data,
learning_rate, lrdecay, momentum, verbose,
batch_learning, weight_decay)
else:
self.trainer = BackpropTrainer(self.neural_net, self.training_data,
learning_rate, lrdecay, momentum, verbose,
batch_learning, weight_decay)
def train_network(self,
max_epochs=-1,
verbose=False,
continue_epochs=10,
validation_proportion=0.25,
force_split=False):
"""
Trains the network until the error rate converges.
:param max_epochs: The maximum number of epochs the network is trained. The default is to not set a
maximum.
:param verbose: Toggles verbose mode or not. The default is not.
:param continue_epochs: How much longer the training should go on if we find a minimum in the error.
:param validation_proportion: The proportion that will be used for validation. The default is 25%, given that
the 'self.validation_data' has not been set.
:param force_split: If the 'self.validation_data' as been set, we can force another split on the
training data.
:type max_epochs: int
:type verbose: bool
:type continue_epochs: int
:type validation_proportion: float
:type force_split: bool
:return: None
"""
if max_epochs <= 0:
# We don't give it a stop criteria for time.
if self.validation_data is not None:
# We have already set aside some of the data for validation
if force_split:
# Screw that! I want the data to be split again!
self.trainer.trainUntilConvergence(self.training_data, None, verbose,
continue_epochs, validation_proportion)
else:
self.trainer.trainUntilConvergence(self.training_data, None, verbose, continue_epochs, 1)
else:
# We have no validation data set
self.trainer.trainUntilConvergence(self.data_set, None, verbose, continue_epochs, validation_proportion)
else:
# We have a stop criteria.
if self.validation_data is not None:
# We have already split the data into a validation set, and a training set.
if force_split:
# Screw that! I want the data to be split again
self.trainer.trainUntilConvergence(self.training_data, max_epochs, verbose,
continue_epochs, validation_proportion)
else:
self.trainer.trainUntilConvergence(self.training_data, max_epochs, verbose, continue_epochs, 1)
else:
# We do not have a validation data set.
self.trainer.trainUntilConvergence(self.data_set, max_epochs, verbose,
continue_epochs, validation_proportion)
Пожалуйста, внесите ваш код. – fracz