\arch\arm64\include\asm\fixmap.h

enum fixed_addresses {
    FIX_HOLE,

    /*
     * Reserve a virtual window for the FDT that is 2 MB larger than the
     * maximum supported size, and put it at the top of the fixmap region.
     * The additional space ensures that any FDT that does not exceed
     * MAX_FDT_SIZE can be mapped regardless of whether it crosses any
     * 2 MB alignment boundaries.
     *
     * Keep this at the top so it remains 2 MB aligned.
     */
#define FIX_FDT_SIZE        (MAX_FDT_SIZE + SZ_2M)
    FIX_FDT_END,
    FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,

    FIX_EARLYCON_MEM_BASE,
    FIX_TEXT_POKE0,

#ifdef CONFIG_ACPI_APEI_GHES
    /* Used for GHES mapping from assorted contexts */
    FIX_APEI_GHES_IRQ,
    FIX_APEI_GHES_NMI,
#endif /* CONFIG_ACPI_APEI_GHES */

#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
    FIX_ENTRY_TRAMP_DATA,
    FIX_ENTRY_TRAMP_TEXT,
#define TRAMP_VALIAS        (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
    __end_of_permanent_fixed_addresses,

    /*
     * Temporary boot-time mappings, used by early_ioremap(),
     * before ioremap() is functional.
     */
#define NR_FIX_BTMAPS        (SZ_256K / PAGE_SIZE)
#define FIX_BTMAPS_SLOTS    7
#define TOTAL_FIX_BTMAPS    (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)

    FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
    FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,

    /*
     * Used for kernel page table creation, so unmapped memory may be used
     * for tables.
     */
    FIX_PTE,
    FIX_PMD,
    FIX_PUD,
    FIX_PGD,

    __end_of_fixed_addresses
};

#define FIXADDR_SIZE    (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)

\arch\arm64\mm\mmu.c

void __init early_fixmap_init(void)
{
    pgd_t *pgdp, pgd;
    pud_t *pudp;
    pmd_t *pmdp;
    unsigned long addr = FIXADDR_START;

    pgdp = pgd_offset_k(addr);
    pgd = READ_ONCE(*pgdp);
    if (CONFIG_PGTABLE_LEVELS > 3 &&
        !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) {
        /*
         * We only end up here if the kernel mapping and the fixmap
         * share the top level pgd entry, which should only happen on
         * 16k/4 levels configurations.
         */
        BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
        pudp = pud_offset_kimg(pgdp, addr);
    } else {
        if (pgd_none(pgd))
            __pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
        pudp = fixmap_pud(addr);
    }
    if (pud_none(READ_ONCE(*pudp)))
        __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
    pmdp = fixmap_pmd(addr);
    __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);

--------------------but------------------------

\arch\arm64\mm\mmu.c

void __init paging_init(void)
{
    pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));

    map_kernel(pgdp);
    map_mem(pgdp);

    pgd_clear_fixmap();

    cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
    init_mm.pgd = swapper_pg_dir;

    memblock_free(__pa_symbol(init_pg_dir),
              __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));

    memblock_allow_resize();
}

#define pgd_set_fixmap(addr)    ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))

#define __set_fixmap_offset(idx, phys, flags)                \
({                                    \
    unsigned long ________addr;                    \
    __set_fixmap(idx, phys, flags);                    \
    ________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1));    \
    ________addr;                            \
})

#define set_fixmap_offset(idx, phys) \
    __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NORMAL)
 

\arch\arm64\mm\mmu.c

void __set_fixmap(enum fixed_addresses idx,
                   phys_addr_t phys, pgprot_t flags)
{
    unsigned long addr = __fix_to_virt(idx);
    pte_t *ptep;

    BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);

    ptep = fixmap_pte(addr);//此次获取的ptep是否超出了early_fixmap_init中FIXADDR_START初始化的范围,FIX_PGD对于的内存可不在FIXADDR_START~FIXADDR_TOP范围中的

    if (pgprot_val(flags)) {
        set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
    } else {
        pte_clear(&init_mm, addr, ptep);
        flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
    }
}

\arch\arm64\mm\mmu.c

static inline pte_t * fixmap_pte(unsigned long addr)
{
    return &bm_pte[pte_index(addr)];
}

\arch\arm64\include\asm\pgtable.h
static inline void set_pte(pte_t *ptep, pte_t pte)
{
    WRITE_ONCE(*ptep, pte);

    /*
     * Only if the new pte is valid and kernel, otherwise TLB maintenance
     * or update_mmu_cache() have the necessary barriers.
     */
    if (pte_valid_not_user(pte))
        dsb(ishst);
}

07-06 21:54