-
Notifications
You must be signed in to change notification settings - Fork 90
/
Copy pathkeras_mlp.Rd
51 lines (43 loc) · 1.46 KB
/
keras_mlp.Rd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mlp.R
\name{keras_mlp}
\alias{keras_mlp}
\title{Simple interface to MLP models via keras}
\usage{
keras_mlp(
x,
y,
hidden_units = 5,
penalty = 0,
dropout = 0,
epochs = 20,
activation = "softmax",
seeds = sample.int(10^5, size = 3),
...
)
}
\arguments{
\item{x}{A data frame or matrix of predictors}
\item{y}{A vector (factor or numeric) or matrix (numeric) of outcome data.}
\item{hidden_units}{An integer for the number of hidden units.}
\item{penalty}{A non-negative real number for the amount of weight decay. Either
this parameter \emph{or} \code{dropout} can specified.}
\item{dropout}{The proportion of parameters to set to zero. Either
this parameter \emph{or} \code{penalty} can specified.}
\item{epochs}{An integer for the number of passes through the data.}
\item{activation}{A character string for the type of activation function between layers.}
\item{seeds}{A vector of three positive integers to control randomness of the
calculations.}
\item{...}{Additional named arguments to pass to \code{keras::compile()} or
\code{keras::fit()}. Arguments will be sorted and passed to either function
internally.}
}
\value{
A \code{keras} model object.
}
\description{
Instead of building a \code{keras} model sequentially, \code{keras_mlp} can be used to
create a feedforward network with a single hidden layer. Regularization is
via either weight decay or dropout.
}
\keyword{internal}