60 lines
1.5 KiB
R
60 lines
1.5 KiB
R
# Function to calculate gradient
|
|
# @x - vector of values (2)
|
|
grad <- function(x){
|
|
return( (x - 7)^2 * x * (7*x^3 - 34*x^2 - 39*x + 168) )
|
|
}
|
|
|
|
startPoint <- 3
|
|
|
|
f <- function(x){
|
|
return( x^2 * (x + 3) * (x - 4) * (x - 7)^3 )
|
|
}
|
|
|
|
# Function to minimize function
|
|
# @x0 - starting point
|
|
# @epsilon - maximum error
|
|
# @alpha - learning rate
|
|
# @i.max - maximum number of iterations
|
|
grad.descent <- function(x0 = startPoint,
|
|
epsilon = 0.01,
|
|
alpha = 0.00001,
|
|
i.max = 1e6){
|
|
gradient <- grad(x0) # Initialize gradient
|
|
x.path <- x0
|
|
loss <- c()
|
|
for (i in 1:i.max){
|
|
x.new <- x0 - alpha * gradient # Update
|
|
gradient <- grad(x.new) # Gradinet in new point
|
|
points(x = x.new, y = f(x.new), pch = 20, col = 'green', cex = 0.5)
|
|
currentLoss <- (f(x0) - f(x.new))^2
|
|
print(currentLoss)
|
|
loss <- append(loss, currentLoss )
|
|
if (currentLoss < epsilon){ # STOP
|
|
break
|
|
}
|
|
x0 <- x.new
|
|
x.path <- rbind(x.path, x.new)
|
|
}
|
|
return(list(x.new, x.path, i, loss))
|
|
}
|
|
|
|
x <- seq(-3, 8.5, by=0.1)
|
|
y <- f(x)
|
|
g <- grad(x)
|
|
zero <-
|
|
|
|
plot(x, y, type="l", ylim = c(-15000, 30000))
|
|
lines(x, g, col="yellow")
|
|
abline(h = 0, col="gray")
|
|
|
|
result <- grad.descent()
|
|
round(f(result[[1]][1]), 3) # Wartość funkcji w znalezionym punkcie
|
|
round(result[[1]], 2) # Znaleziony punkt
|
|
|
|
points(x = startPoint, y = f(startPoint), pch = 20, col = 'red', cex = 2) # Staring point
|
|
points(x = result[[1]], y = f(result[[1]]), pch = 20, col = 'blue', cex = 2)
|
|
|
|
plot(result[[4]], type="l")
|
|
|
|
|