我试图用MiniMax和Alpha-Beta剪枝来实现2048年的人工智能,这是基于snake策略(参见this论文)的,作为一个单一的启发式方法,这似乎是最好的。
不幸的是,人工智能在大多数游戏中都有256个,这并不比空细胞启发式好多少。我已经读过这里的相关主题,但我自己找不到解决方法。
代码如下:

import math
from BaseAI_3 import BaseAI

INF_P = math.inf

class PlayerAI(BaseAI):
    move_str = {
        0: "UP",
        1: "DOWN",
        2: "LEFT",
        3: "RIGHT"
    }

    def __init__(self):
        super().__init__()
        self.depth_max = 4

    def getMove(self, grid):
        move_direction, state, utility = self.decision(grid)
        act_move = moves.index(move_direction)
        return moves[act_move] if moves else None

    def get_children(self, grid):
        grid.children = []
        for move_direction in grid.getAvailableMoves():
            gridCopy = grid.clone()
            gridCopy.path = grid.path[:]
            gridCopy.path.append(PlayerAI.move_str[move_direction])
            gridCopy.move(move_direction)
            gridCopy.depth_current = grid.depth_current + 1
            grid.children.append((move_direction, gridCopy))
        return grid.children

    def utility(self, state):

        def snake():
            poses = [
                [
                    [2 ** 15, 2 ** 14, 2 ** 13, 2 ** 12],
                    [2 ** 8, 2 ** 9, 2 ** 10, 2 ** 11],
                    [2 ** 7, 2 ** 6, 2 ** 5, 2 ** 4],
                    [2 ** 0, 2 ** 1, 2 ** 2, 2 ** 3]
                ]
                ,
                [
                   [2 ** 15, 2 ** 8, 2 ** 7, 2 ** 0],
                   [2 ** 14, 2 ** 9, 2 ** 6, 2 ** 1],
                   [2 ** 13, 2 ** 10, 2 ** 5, 2 ** 2],
                   [2 ** 12, 2 ** 11, 2 ** 4, 2 ** 3]
                ]
            ]

            poses.append([item for item in reversed(poses[0])])
            poses.append([list(reversed(item)) for item in reversed(poses[0])])
            poses.append([list(reversed(item)) for item in poses[0]])

            poses.append([item for item in reversed(poses[1])])
            poses.append([list(reversed(item)) for item in reversed(poses[1])])
            poses.append([list(reversed(item)) for item in poses[1]])

            max_value = -INF_P
            for pos in poses:
                value = 0
                for i in range(state.size):
                    for j in range(state.size):
                        value += state.map[i][j] * pos[i][j]

                if value > max_value:
                    max_value = value

            return max_value

        weight_snake = 1 / (2 ** 13)

        value = (
            weight_snake * snake(),
        )

        return value

    def decision(self, state):
        state.depth_current = 1
        state.path = []
        return self.maximize(state, -INF_P, INF_P)

    def terminal_state(self, state):
        return state.depth_current >= self.depth_max

    def maximize(self, state, alpha, beta):
        # terminal-state check
        if self.terminal_state(state):
            return (None, state, self.utility(state))

        max_move_direction, max_child, max_utility = None, None, (-INF_P, )
        for move_direction, child in self.get_children(state):
            _, state2, utility = self.minimize(child, alpha, beta)
            child.utility = utility

            if sum(utility) > sum(max_utility):
                max_move_direction, max_child, max_utility = move_direction, child, utility

            if sum(max_utility) >= beta:
                break

            if sum(max_utility) > alpha:
                alpha = sum(max_utility)

        state.utility = max_utility
        state.alpha = alpha
        state.beta = beta

        return max_move_direction, max_child, max_utility

    def minimize(self, state, alpha, beta):
        # terminal-state check
        if self.terminal_state(state):
            return (None, state, self.utility(state))

        min_move_direction, min_child, min_utility = None, None, (INF_P, )
        for move_direction, child in self.get_children(state):
            _, state2, utility = self.maximize(child, alpha, beta)
            child.utility = utility

            if sum(utility) < sum(min_utility):
                min_move_direction, min_child, min_utility = move_direction, child, utility

            if sum(min_utility) <= alpha:
                break

            if sum(min_utility) < beta:
                beta = sum(min_utility)

        state.utility = min_utility
        state.alpha = alpha
        state.beta = beta

        return min_move_direction, min_child, min_utility

grid是一个对象,grid.map是一个二维数组(列表列表)。
我有什么错误吗?如何导入代码?
添加游戏日志:https://pastebin.com/eyzgU2dN

最佳答案

在过去的一个周末,我意识到算法没有得到正确的实现。在minimize()函数中出现了一个错误,我以错误的方式搜索孩子-应该是这样的:

def get_opponent_children(self, grid):
    grid.children = []
    for x in range(grid.size):
        for y in range(grid.size):
            if grid.map[x][y] == 0:
                for c in (2, 4):
                    gridCopy = grid.clone()
                    gridCopy.path = grid.path[:]
                    gridCopy.deep_current = grid.deep_current + 1
                    gridCopy.map[x][y] = c
                    grid.children.append((None, gridCopy))

    return grid.children

以及相应的变更:
for move_direction, child in self.get_opponent_children(state):

现在大部分时间可以达到1024和2048。

关于python - 2048游戏-AI的平均得分不能超过256,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/44846875/

10-12 21:52