-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsvm.py
executable file
·68 lines (54 loc) · 1.83 KB
/
svm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
# -*-coding: utf-8 -*-
"""
Created on April 8, 2016
"""
import numpy
from six import add_metaclass
from zope.interface import implementer, Interface
from mapped_object_registry import MappedObjectsRegistry
class IModel(Interface):
def get_sum_grad_weights(self, loss_grad):
"""
:param loss_grad:
:return:
"""
def get_sum_grad_bias(self, loss_grad):
"""
:param loss_grad:
:return:
"""
class ModelsRegistry(MappedObjectsRegistry):
mapping = "models"
@add_metaclass(ModelsRegistry)
class BaseModel(object):
pass
@implementer(IModel)
class LinearSVM(BaseModel):
MAPPING = "linear_svm"
def __init__(self, n_features, minibatch_size, minibatch):
self.n_features = n_features
self.minibatch_size = minibatch_size
self.minibatch = minibatch
def get_sum_grad_weights(self, loss_grad):
"""
Calculates sum(dE/dy * dy/dw), where E(y) - loss function,
dE/dy - loss function partial derivative on y,
y = wx - b - linear svm model,
dy/dw - partial derivative on weights of linear svm model and
it equals x, i.e. self.minibatch
:param loss_grad: dE/dy, loss function partial derivative on y
:return: sum(dE/dy * dy/dw)
"""
grad_weights = self.minibatch
return numpy.dot(numpy.transpose(grad_weights), loss_grad)
def get_sum_grad_bias(self, loss_grad):
"""
Calculates sum(dE/dy * dy/db), where E(y) - loss function,
dE/dy - loss function partial derivative on y,
y = wx - b - linear svm model,
dy/db - partial derivative on bias of linear svm model and
it equals -1
:param loss_grad: dE/dy, loss function partial derivative on y
:return: sum(dE/dy * dy/db)
"""
return - sum(loss_grad)[0]