Загрузка данных


import numpy as np

# =========================================================
# ВАРИАНТ №2
# f(x1, x2) = 2(x1 - 4)^2 + (x2 - 6)^2
# x0 = (0, 0)
# =========================================================

# -----------------------------
# Целевая функция
# -----------------------------
def f(x):
    x1, x2 = x
    return 2 * (x1 - 4) ** 2 + (x2 - 6) ** 2


# -----------------------------
# Градиент функции
# ∇f = (4(x1-4), 2(x2-6))
# -----------------------------
def grad_f(x):
    x1, x2 = x
    return np.array([
        4 * (x1 - 4),
        2 * (x2 - 6)
    ])


# -----------------------------
# Гессиан функции
# -----------------------------
def hessian_f():
    return np.array([
        [4, 0],
        [0, 2]
    ])


# =========================================================
# 1. ГРАДИЕНТНЫЙ СПУСК
#    С ПОСТОЯННЫМ ШАГОМ
# =========================================================
def gradient_descent_const(
        x0,
        alpha=0.1,
        eps=1e-4,
        max_iter=100):

    x = np.array(x0, dtype=float)

    print("\n=== Градиентный спуск (постоянный шаг) ===")

    for k in range(max_iter):
        g = grad_f(x)

        print(f"{k:3d} | x = {x} | f(x) = {f(x):.6f} | ||g|| = {np.linalg.norm(g):.6f}")

        if np.linalg.norm(g) < eps:
            break

        x = x - alpha * g

    print("\nМинимум:", x)
    print("f(x*) =", f(x))

    return x


# =========================================================
# 2. МЕТОД НАИСКОРЕЙШЕГО СПУСКА
#    (оптимальный шаг)
# =========================================================
def steepest_descent(
        x0,
        eps=1e-4,
        max_iter=100):

    x = np.array(x0, dtype=float)

    print("\n=== Метод наискорейшего спуска ===")

    H = hessian_f()

    for k in range(max_iter):
        g = grad_f(x)

        print(f"{k:3d} | x = {x} | f(x) = {f(x):.6f} | ||g|| = {np.linalg.norm(g):.6f}")

        if np.linalg.norm(g) < eps:
            break

        # alpha = (g^T g) / (g^T H g)
        alpha = (g @ g) / (g @ H @ g)

        x = x - alpha * g

    print("\nМинимум:", x)
    print("f(x*) =", f(x))

    return x


# =========================================================
# 3. ГРАДИЕНТНЫЙ СПУСК
#    С ПРАВИЛОМ АРМИХО
# =========================================================
def gradient_descent_armijo(
        x0,
        alpha0=1.0,
        rho=0.5,
        c=0.5,
        eps=1e-4,
        max_iter=100):

    x = np.array(x0, dtype=float)

    print("\n=== Градиентный спуск (Армихо) ===")

    for k in range(max_iter):
        g = grad_f(x)

        print(f"{k:3d} | x = {x} | f(x) = {f(x):.6f} | ||g|| = {np.linalg.norm(g):.6f}")

        if np.linalg.norm(g) < eps:
            break

        alpha = alpha0

        # Правило Армихо
        while f(x - alpha * g) > f(x) - c * alpha * (np.linalg.norm(g) ** 2):
            alpha *= rho

        x = x - alpha * g

    print("\nМинимум:", x)
    print("f(x*) =", f(x))

    return x


# =========================================================
# 4. МЕТОД НЬЮТОНА
# =========================================================
def newton_method(
        x0,
        eps=1e-4,
        max_iter=100):

    x = np.array(x0, dtype=float)

    print("\n=== Метод Ньютона ===")

    H_inv = np.linalg.inv(hessian_f())

    for k in range(max_iter):
        g = grad_f(x)

        print(f"{k:3d} | x = {x} | f(x) = {f(x):.6f} | ||g|| = {np.linalg.norm(g):.6f}")

        if np.linalg.norm(g) < eps:
            break

        x = x - H_inv @ g

    print("\nМинимум:", x)
    print("f(x*) =", f(x))

    return x


# =========================================================
# ГЛАВНАЯ ПРОГРАММА
# =========================================================
if __name__ == "__main__":

    x0 = [0, 0]

    # 1. Градиентный спуск с постоянным шагом
    gradient_descent_const(x0, alpha=0.1)

    # 2. Метод наискорейшего спуска
    steepest_descent(x0)

    # 3. Метод Армихо
    gradient_descent_armijo(x0)

    # 4. Метод Ньютона
    newton_method(x0)