Shark machine learning library
About Shark
News!
Contribute
Credits and copyright
Downloads
Getting Started
Installation
Using the docs
Documentation
Tutorials
Quick references
Class list
Global functions
FAQ
Showroom
Main Page
Related Pages
Modules
Namespaces
Classes
Files
File List
File Members
obj-x86_64-linux-gnu
examples
Supervised
VersatileClassificationTutorial-Network.cpp
Go to the documentation of this file.
1
2
#include <
shark/Data/Dataset.h
>
3
#include <
shark/Data/Csv.h
>
4
#include <
shark/ObjectiveFunctions/Loss/ZeroOneLoss.h
>
5
6
#include <
shark/Models/FFNet.h
>
7
#include <
shark/ObjectiveFunctions/Loss/CrossEntropy.h
>
8
#include <
shark/ObjectiveFunctions/ErrorFunction.h
>
9
#include <
shark/Algorithms/GradientDescent/Rprop.h
>
10
#include <
shark/Algorithms/StoppingCriteria/MaxIterations.h
>
11
#include <
shark/Algorithms/Trainers/OptimizationTrainer.h
>
12
13
14
using namespace
shark
;
15
16
int
main
()
17
{
18
// Load data, use 70% for training and 30% for testing.
19
// The path is hard coded; make sure to invoke the executable
20
// from a place where the data file can be found. It is located
21
// under [shark]/examples/Supervised/data.
22
ClassificationDataset
traindata, testdata;
23
importCSV
(traindata,
"data/quickstartData.csv"
,
LAST_COLUMN
,
' '
);
24
testdata =
splitAtElement
(traindata, 70 * traindata.
numberOfElements
() / 100);
25
26
typedef
FFNet<LogisticNeuron, LogisticNeuron>
ModelType;
// sigmoid transfer function for hidden and output neurons
27
ModelType model;
28
size_t
N =
inputDimension
(traindata);
29
size_t
M = 10;
30
model.setStructure(N, M, 2);
// N inputs (depends on the data),
31
// M hidden neurons (depends on problem difficulty),
32
// and two output neurons (two classes).
33
initRandomUniform
(model, -0.1, 0.1);
// initialize with small random weights
34
CrossEntropy
trainloss;
// differentiable loss for neural network training
35
IRpropPlus
optimizer;
// gradient-based optimization algorithm
36
MaxIterations<>
stop(100);
// stop optimization after 100 Rprop steps
37
OptimizationTrainer<ModelType, unsigned int>
trainer(&trainloss, &optimizer, &stop);
38
39
trainer.
train
(model, traindata);
40
41
Data<RealVector>
prediction = model(testdata.
inputs
());
42
43
ZeroOneLoss<unsigned int, RealVector>
loss;
44
double
error_rate = loss(testdata.
labels
(), prediction);
45
46
std::cout <<
"model: "
<< model.
name
() << std::endl
47
<<
"trainer: "
<< trainer.
name
() << std::endl
48
<<
"test error rate: "
<< error_rate << std::endl;
49
}