From 4bce7efec739552c58694956eb7d6ee2a95cc3de Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Tue, 8 Oct 2024 11:50:32 -0700 Subject: [PATCH 01/35] fusee/exo/ams: update with new keydata/version enums --- .../program/source/boot/secmon_boot_key_data.s | 9 +++++++-- .../program/source/boot/secmon_package2.cpp | 2 +- fusee/program/source/fusee_key_derivation.cpp | 11 ++++++++--- fusee/program/source/fusee_package2.cpp | 2 +- fusee/program/source/fusee_setup_horizon.cpp | 2 ++ fusee/program/source/fusee_stratosphere.cpp | 16 ++++++++++++++++ .../exosphere/pkg1/pkg1_key_generation.hpp | 1 + .../libexosphere/include/exosphere/pkg2.hpp | 2 +- libraries/libexosphere/source/fuse/fuse_api.cpp | 1 + .../fs/impl/fs_id_string_impl.os.generic.cpp | 5 +++-- .../include/vapours/ams/ams_api_version.h | 8 ++++---- .../include/vapours/ams/ams_target_firmware.h | 4 +++- 12 files changed, 48 insertions(+), 15 deletions(-) diff --git a/exosphere/program/source/boot/secmon_boot_key_data.s b/exosphere/program/source/boot/secmon_boot_key_data.s index b745114f17..826441314d 100644 --- a/exosphere/program/source/boot/secmon_boot_key_data.s +++ b/exosphere/program/source/boot/secmon_boot_key_data.s @@ -85,10 +85,10 @@ _ZN3ams6secmon4boot15VolatileKeyDataE: /* We can get away with only including latest because exosphere supports newer-than-expected master key in engine. */ /* TODO: Update on next change of keys. */ /* Mariko Development Master Kek Source. */ -.byte 0xE4, 0x45, 0xD0, 0x14, 0xA0, 0xE5, 0xE9, 0x4B, 0xFE, 0x76, 0xF4, 0x29, 0x41, 0xBB, 0x64, 0xED +.byte 0x65, 0x7B, 0x11, 0x46, 0x0E, 0xC2, 0x22, 0x5D, 0xB9, 0xF1, 0xF5, 0x00, 0xF9, 0x3E, 0x1F, 0x70 /* Mariko Production Master Kek Source. */ -.byte 0x4F, 0x41, 0x3C, 0x3B, 0xFB, 0x6A, 0x01, 0x2A, 0x68, 0x9F, 0x83, 0xE9, 0x53, 0xBD, 0x16, 0xD2 +.byte 0x31, 0xBE, 0x25, 0xFB, 0xDB, 0xB4, 0xEE, 0x49, 0x5C, 0x77, 0x05, 0xC2, 0x36, 0x9F, 0x34, 0x80 /* Development Master Key Vectors. */ .byte 0x46, 0x22, 0xB4, 0x51, 0x9A, 0x7E, 0xA7, 0x7F, 0x62, 0xA1, 0x1F, 0x8F, 0xC5, 0x3A, 0xDB, 0xFE /* Zeroes encrypted with Master Key 00. */ @@ -109,6 +109,7 @@ _ZN3ams6secmon4boot15VolatileKeyDataE: .byte 0x78, 0x66, 0x19, 0xBD, 0x86, 0xE7, 0xC1, 0x09, 0x9B, 0x6F, 0x92, 0xB2, 0x58, 0x7D, 0xCF, 0x26 /* Master key 0E encrypted with Master key 0F. */ .byte 0x39, 0x1E, 0x7E, 0xF8, 0x7E, 0x73, 0xEA, 0x6F, 0xAF, 0x00, 0x3A, 0xB4, 0xAA, 0xB8, 0xB7, 0x59 /* Master key 0F encrypted with Master key 10. */ .byte 0x0C, 0x75, 0x39, 0x15, 0x53, 0xEA, 0x81, 0x11, 0xA3, 0xE0, 0xDC, 0x3D, 0x0E, 0x76, 0xC6, 0xB8 /* Master key 10 encrypted with Master key 11. */ +.byte 0x90, 0x64, 0xF9, 0x08, 0x29, 0x88, 0xD4, 0xDC, 0x73, 0xA4, 0xA1, 0x13, 0x9E, 0x59, 0x85, 0xA0 /* Master key 11 encrypted with Master key 12. */ /* Production Master Key Vectors. */ .byte 0x0C, 0xF0, 0x59, 0xAC, 0x85, 0xF6, 0x26, 0x65, 0xE1, 0xE9, 0x19, 0x55, 0xE6, 0xF2, 0x67, 0x3D /* Zeroes encrypted with Master Key 00. */ @@ -129,6 +130,7 @@ _ZN3ams6secmon4boot15VolatileKeyDataE: .byte 0xAF, 0x11, 0x4C, 0x67, 0x17, 0x7A, 0x52, 0x43, 0xF7, 0x70, 0x2F, 0xC7, 0xEF, 0x81, 0x72, 0x16 /* Master key 0E encrypted with Master key 0F. */ .byte 0x25, 0x12, 0x8B, 0xCB, 0xB5, 0x46, 0xA1, 0xF8, 0xE0, 0x52, 0x15, 0xB7, 0x0B, 0x57, 0x00, 0xBD /* Master key 0F encrypted with Master key 10. */ .byte 0x58, 0x15, 0xD2, 0xF6, 0x8A, 0xE8, 0x19, 0xAB, 0xFB, 0x2D, 0x52, 0x9D, 0xE7, 0x55, 0xF3, 0x93 /* Master key 10 encrypted with Master key 11. */ +.byte 0x4A, 0x01, 0x3B, 0xC7, 0x44, 0x6E, 0x45, 0xBD, 0xE6, 0x5E, 0x2B, 0xEC, 0x07, 0x37, 0x52, 0x86 /* Master key 11 encrypted with Master key 12. */ /* Device Master Key Source Sources. */ .byte 0x8B, 0x4E, 0x1C, 0x22, 0x42, 0x07, 0xC8, 0x73, 0x56, 0x94, 0x08, 0x8B, 0xCC, 0x47, 0x0F, 0x5D /* 4.0.0 Device Master Key Source Source. */ @@ -146,6 +148,7 @@ _ZN3ams6secmon4boot15VolatileKeyDataE: .byte 0xEA, 0x90, 0x6E, 0xA8, 0xAE, 0x92, 0x99, 0x64, 0x36, 0xC1, 0xF3, 0x1C, 0xC6, 0x32, 0x83, 0x8C /* 16.0.0 Device Master Key Source Source. */ .byte 0xDA, 0xB9, 0xD6, 0x77, 0x52, 0x2D, 0x1F, 0x78, 0x73, 0xC9, 0x98, 0x5B, 0x06, 0xFE, 0xA0, 0x52 /* 17.0.0 Device Master Key Source Source. */ .byte 0x14, 0xF5, 0xA5, 0xD0, 0x73, 0x6D, 0x44, 0x80, 0x5F, 0x31, 0x5A, 0x8F, 0x1E, 0xD4, 0x0D, 0x63 /* 18.0.0 Device Master Key Source Source. */ +.byte 0x07, 0x38, 0x9A, 0xEC, 0x9C, 0xBD, 0x50, 0x4A, 0x4C, 0x1F, 0x04, 0xDA, 0x40, 0x68, 0x29, 0xE3 /* 19.0.0 Device Master Key Source Source. */ /* Development Device Master Kek Sources. */ .byte 0xD6, 0xBD, 0x9F, 0xC6, 0x18, 0x09, 0xE1, 0x96, 0x20, 0x39, 0x60, 0xD2, 0x89, 0x83, 0x31, 0x34 /* 4.0.0 Device Master Kek Source. */ @@ -163,6 +166,7 @@ _ZN3ams6secmon4boot15VolatileKeyDataE: .byte 0xFF, 0xF6, 0x4B, 0x0F, 0xFF, 0x0D, 0xC0, 0x4F, 0x56, 0x8A, 0x40, 0x74, 0x67, 0xC5, 0xFE, 0x9F /* 16.0.0 Device Master Kek Source. */ .byte 0x4E, 0xCE, 0x7B, 0x2A, 0xEA, 0x2E, 0x3D, 0x16, 0xD5, 0x2A, 0xDE, 0xF6, 0xF8, 0x6A, 0x7D, 0x43 /* 17.0.0 Device Master Kek Source. */ .byte 0x3B, 0x00, 0x89, 0xD7, 0xA9, 0x9E, 0xB7, 0x70, 0x86, 0x00, 0xC3, 0x49, 0x52, 0x8C, 0xA4, 0xAF /* 18.0.0 Device Master Kek Source. */ +.byte 0xAE, 0x78, 0x36, 0xB6, 0x91, 0xEB, 0xAF, 0x9C, 0x18, 0xF1, 0xC0, 0xD5, 0x8A, 0x0C, 0x7C, 0xA1 /* 19.0.0 Device Master Kek Source. */ /* Production Device Master Kek Sources. */ .byte 0x88, 0x62, 0x34, 0x6E, 0xFA, 0xF7, 0xD8, 0x3F, 0xE1, 0x30, 0x39, 0x50, 0xF0, 0xB7, 0x5D, 0x5D /* 4.0.0 Device Master Kek Source. */ @@ -180,3 +184,4 @@ _ZN3ams6secmon4boot15VolatileKeyDataE: .byte 0xF0, 0xF3, 0xFF, 0x52, 0x75, 0x2F, 0xBA, 0x4D, 0x09, 0x72, 0x30, 0x89, 0xA9, 0xDF, 0xFE, 0x1F /* 16.0.0 Device Master Kek Source. */ .byte 0x21, 0xD6, 0x35, 0xF1, 0x0F, 0x7A, 0xF0, 0x5D, 0xDF, 0x79, 0x1C, 0x7A, 0xE4, 0x32, 0x82, 0x9E /* 17.0.0 Device Master Kek Source. */ .byte 0xE7, 0x85, 0x8C, 0xA2, 0xF4, 0x49, 0xCB, 0x07, 0xD1, 0x8E, 0x48, 0x1B, 0xE8, 0x1E, 0x28, 0x3B /* 18.0.0 Device Master Kek Source. */ +.byte 0x9B, 0xA5, 0xFD, 0x74, 0x7F, 0xCD, 0x23, 0xD1, 0xD9, 0xBD, 0x6C, 0x51, 0x72, 0x5F, 0x3D, 0x1F /* 19.0.0 Device Master Kek Source. */ diff --git a/exosphere/program/source/boot/secmon_package2.cpp b/exosphere/program/source/boot/secmon_package2.cpp index a51e1a7122..2d6b8948e3 100644 --- a/exosphere/program/source/boot/secmon_package2.cpp +++ b/exosphere/program/source/boot/secmon_package2.cpp @@ -94,7 +94,7 @@ namespace ams::secmon::boot { } /* Check that the key generation is one that we can use. */ - static_assert(pkg1::KeyGeneration_Count == 18); + static_assert(pkg1::KeyGeneration_Count == 19); if (key_generation >= pkg1::KeyGeneration_Count) { return false; } diff --git a/fusee/program/source/fusee_key_derivation.cpp b/fusee/program/source/fusee_key_derivation.cpp index cad7ca5897..3d8eb31bfd 100644 --- a/fusee/program/source/fusee_key_derivation.cpp +++ b/fusee/program/source/fusee_key_derivation.cpp @@ -23,17 +23,17 @@ namespace ams::nxboot { alignas(se::AesBlockSize) constexpr inline const u8 MarikoMasterKekSource[se::AesBlockSize] = { /* TODO: Update on next change of keys. */ - 0x4F, 0x41, 0x3C, 0x3B, 0xFB, 0x6A, 0x01, 0x2A, 0x68, 0x9F, 0x83, 0xE9, 0x53, 0xBD, 0x16, 0xD2 + 0x31, 0xBE, 0x25, 0xFB, 0xDB, 0xB4, 0xEE, 0x49, 0x5C, 0x77, 0x05, 0xC2, 0x36, 0x9F, 0x34, 0x80 }; alignas(se::AesBlockSize) constexpr inline const u8 MarikoMasterKekSourceDev[se::AesBlockSize] = { /* TODO: Update on next change of keys. */ - 0xE4, 0x45, 0xD0, 0x14, 0xA0, 0xE5, 0xE9, 0x4B, 0xFE, 0x76, 0xF4, 0x29, 0x41, 0xBB, 0x64, 0xED + 0x65, 0x7B, 0x11, 0x46, 0x0E, 0xC2, 0x22, 0x5D, 0xB9, 0xF1, 0xF5, 0x00, 0xF9, 0x3E, 0x1F, 0x70 }; alignas(se::AesBlockSize) constexpr inline const u8 EristaMasterKekSource[se::AesBlockSize] = { /* TODO: Update on next change of keys. */ - 0x00, 0x04, 0x5D, 0xF0, 0x4D, 0xCD, 0x14, 0xA3, 0x1C, 0xBF, 0xDE, 0x48, 0x55, 0xBA, 0x35, 0xC1 + 0xD7, 0x63, 0x74, 0x46, 0x4E, 0xBA, 0x78, 0x0A, 0x7C, 0x9D, 0xB3, 0xE8, 0x7A, 0x3D, 0x71, 0xE3 }; alignas(se::AesBlockSize) constexpr inline const u8 KeyblobKeySource[se::AesBlockSize] = { @@ -72,6 +72,7 @@ namespace ams::nxboot { { 0xEA, 0x90, 0x6E, 0xA8, 0xAE, 0x92, 0x99, 0x64, 0x36, 0xC1, 0xF3, 0x1C, 0xC6, 0x32, 0x83, 0x8C }, /* 16.0.0 Device Master Key Source Source. */ { 0xDA, 0xB9, 0xD6, 0x77, 0x52, 0x2D, 0x1F, 0x78, 0x73, 0xC9, 0x98, 0x5B, 0x06, 0xFE, 0xA0, 0x52 }, /* 17.0.0 Device Master Key Source Source. */ { 0x14, 0xF5, 0xA5, 0xD0, 0x73, 0x6D, 0x44, 0x80, 0x5F, 0x31, 0x5A, 0x8F, 0x1E, 0xD4, 0x0D, 0x63 }, /* 18.0.0 Device Master Key Source Source. */ + { 0x07, 0x38, 0x9A, 0xEC, 0x9C, 0xBD, 0x50, 0x4A, 0x4C, 0x1F, 0x04, 0xDA, 0x40, 0x68, 0x29, 0xE3 }, /* 19.0.0 Device Master Key Source Source. */ }; alignas(se::AesBlockSize) constexpr inline const u8 DeviceMasterKekSources[pkg1::OldDeviceMasterKeyCount][se::AesBlockSize] = { @@ -90,6 +91,7 @@ namespace ams::nxboot { { 0xF0, 0xF3, 0xFF, 0x52, 0x75, 0x2F, 0xBA, 0x4D, 0x09, 0x72, 0x30, 0x89, 0xA9, 0xDF, 0xFE, 0x1F }, /* 16.0.0 Device Master Kek Source. */ { 0x21, 0xD6, 0x35, 0xF1, 0x0F, 0x7A, 0xF0, 0x5D, 0xDF, 0x79, 0x1C, 0x7A, 0xE4, 0x32, 0x82, 0x9E }, /* 17.0.0 Device Master Kek Source. */ { 0xE7, 0x85, 0x8C, 0xA2, 0xF4, 0x49, 0xCB, 0x07, 0xD1, 0x8E, 0x48, 0x1B, 0xE8, 0x1E, 0x28, 0x3B }, /* 18.0.0 Device Master Kek Source. */ + { 0x9B, 0xA5, 0xFD, 0x74, 0x7F, 0xCD, 0x23, 0xD1, 0xD9, 0xBD, 0x6C, 0x51, 0x72, 0x5F, 0x3D, 0x1F }, /* 19.0.0 Device Master Kek Source. */ }; alignas(se::AesBlockSize) constexpr inline const u8 DeviceMasterKekSourcesDev[pkg1::OldDeviceMasterKeyCount][se::AesBlockSize] = { @@ -108,6 +110,7 @@ namespace ams::nxboot { { 0xFF, 0xF6, 0x4B, 0x0F, 0xFF, 0x0D, 0xC0, 0x4F, 0x56, 0x8A, 0x40, 0x74, 0x67, 0xC5, 0xFE, 0x9F }, /* 16.0.0 Device Master Kek Source. */ { 0x4E, 0xCE, 0x7B, 0x2A, 0xEA, 0x2E, 0x3D, 0x16, 0xD5, 0x2A, 0xDE, 0xF6, 0xF8, 0x6A, 0x7D, 0x43 }, /* 17.0.0 Device Master Kek Source. */ { 0x3B, 0x00, 0x89, 0xD7, 0xA9, 0x9E, 0xB7, 0x70, 0x86, 0x00, 0xC3, 0x49, 0x52, 0x8C, 0xA4, 0xAF }, /* 18.0.0 Device Master Kek Source. */ + { 0xAE, 0x78, 0x36, 0xB6, 0x91, 0xEB, 0xAF, 0x9C, 0x18, 0xF1, 0xC0, 0xD5, 0x8A, 0x0C, 0x7C, 0xA1 }, /* 19.0.0 Device Master Kek Source. */ }; alignas(se::AesBlockSize) constexpr inline const u8 MasterKeySources[pkg1::KeyGeneration_Count][se::AesBlockSize] = { @@ -129,6 +132,7 @@ namespace ams::nxboot { { 0xAF, 0x11, 0x4C, 0x67, 0x17, 0x7A, 0x52, 0x43, 0xF7, 0x70, 0x2F, 0xC7, 0xEF, 0x81, 0x72, 0x16 }, /* Master key 0E encrypted with Master key 0F. */ { 0x25, 0x12, 0x8B, 0xCB, 0xB5, 0x46, 0xA1, 0xF8, 0xE0, 0x52, 0x15, 0xB7, 0x0B, 0x57, 0x00, 0xBD }, /* Master key 0F encrypted with Master key 10. */ { 0x58, 0x15, 0xD2, 0xF6, 0x8A, 0xE8, 0x19, 0xAB, 0xFB, 0x2D, 0x52, 0x9D, 0xE7, 0x55, 0xF3, 0x93 }, /* Master key 10 encrypted with Master key 11. */ + { 0x4A, 0x01, 0x3B, 0xC7, 0x44, 0x6E, 0x45, 0xBD, 0xE6, 0x5E, 0x2B, 0xEC, 0x07, 0x37, 0x52, 0x86 }, /* Master key 11 encrypted with Master key 12. */ }; alignas(se::AesBlockSize) constexpr inline const u8 MasterKeySourcesDev[pkg1::KeyGeneration_Count][se::AesBlockSize] = { @@ -150,6 +154,7 @@ namespace ams::nxboot { { 0x78, 0x66, 0x19, 0xBD, 0x86, 0xE7, 0xC1, 0x09, 0x9B, 0x6F, 0x92, 0xB2, 0x58, 0x7D, 0xCF, 0x26 }, /* Master key 0E encrypted with Master key 0F. */ { 0x39, 0x1E, 0x7E, 0xF8, 0x7E, 0x73, 0xEA, 0x6F, 0xAF, 0x00, 0x3A, 0xB4, 0xAA, 0xB8, 0xB7, 0x59 }, /* Master key 0F encrypted with Master key 10. */ { 0x0C, 0x75, 0x39, 0x15, 0x53, 0xEA, 0x81, 0x11, 0xA3, 0xE0, 0xDC, 0x3D, 0x0E, 0x76, 0xC6, 0xB8 }, /* Master key 10 encrypted with Master key 11. */ + { 0x90, 0x64, 0xF9, 0x08, 0x29, 0x88, 0xD4, 0xDC, 0x73, 0xA4, 0xA1, 0x13, 0x9E, 0x59, 0x85, 0xA0 }, /* Master key 11 encrypted with Master key 12. */ }; alignas(se::AesBlockSize) constinit u8 MasterKeys[pkg1::OldMasterKeyCount][se::AesBlockSize] = {}; diff --git a/fusee/program/source/fusee_package2.cpp b/fusee/program/source/fusee_package2.cpp index 535f0f68d8..9735b0f0ba 100644 --- a/fusee/program/source/fusee_package2.cpp +++ b/fusee/program/source/fusee_package2.cpp @@ -80,7 +80,7 @@ namespace ams::nxboot { } /* Check that the key generation is one that we can use. */ - static_assert(pkg1::KeyGeneration_Count == 18); + static_assert(pkg1::KeyGeneration_Count == 19); if (key_generation >= pkg1::KeyGeneration_Count) { return false; } diff --git a/fusee/program/source/fusee_setup_horizon.cpp b/fusee/program/source/fusee_setup_horizon.cpp index 78c40b2165..6d0d6a3842 100644 --- a/fusee/program/source/fusee_setup_horizon.cpp +++ b/fusee/program/source/fusee_setup_horizon.cpp @@ -261,6 +261,8 @@ namespace ams::nxboot { return ams::TargetFirmware_17_0_0; } else if (std::memcmp(package1 + 0x10, "20240207", 8) == 0) { return ams::TargetFirmware_18_0_0; + } else if (std::memcmp(package1 + 0x10, "20240808", 8) == 0) { + return ams::TargetFirmware_19_0_0; } break; default: diff --git a/fusee/program/source/fusee_stratosphere.cpp b/fusee/program/source/fusee_stratosphere.cpp index a7a380d976..99fea38bd3 100644 --- a/fusee/program/source/fusee_stratosphere.cpp +++ b/fusee/program/source/fusee_stratosphere.cpp @@ -177,6 +177,9 @@ namespace ams::nxboot { FsVersion_18_1_0, FsVersion_18_1_0_Exfat, + FsVersion_19_0_0, + FsVersion_19_0_0_Exfat, + FsVersion_Count, }; @@ -266,6 +269,9 @@ namespace ams::nxboot { { 0xA3, 0x39, 0xF0, 0x1C, 0x95, 0xBF, 0xA7, 0x68 }, /* FsVersion_18_1_0 */ { 0x20, 0x4C, 0xBA, 0x86, 0xDE, 0x08, 0x44, 0x6A }, /* FsVersion_18_1_0_Exfat */ + + { 0xD9, 0x4C, 0x68, 0x15, 0xF8, 0xF5, 0x0A, 0x20 }, /* FsVersion_19_0_0 */ + { 0xED, 0xA8, 0x78, 0x68, 0xA4, 0x49, 0x07, 0x50 }, /* FsVersion_19_0_0_Exfat */ }; const InitialProcessBinaryHeader *FindInitialProcessBinary(const pkg2::Package2Header *header, const u8 *data, ams::TargetFirmware target_firmware) { @@ -645,6 +651,16 @@ namespace ams::nxboot { AddPatch(fs_meta, 0x195FD9, NogcPatch0, sizeof(NogcPatch0)); AddPatch(fs_meta, 0x16FBE0, NogcPatch1, sizeof(NogcPatch1)); break; + case FsVersion_19_0_0: + AddPatch(fs_meta, 0x195C75, NogcPatch0, sizeof(NogcPatch0)); + AddPatch(fs_meta, 0x195E75, NogcPatch0, sizeof(NogcPatch0)); + AddPatch(fs_meta, 0x16F170, NogcPatch1, sizeof(NogcPatch1)); + break; + case FsVersion_19_0_0_Exfat: + AddPatch(fs_meta, 0x1A14A5, NogcPatch0, sizeof(NogcPatch0)); + AddPatch(fs_meta, 0x1A16A5, NogcPatch0, sizeof(NogcPatch0)); + AddPatch(fs_meta, 0x17A9A0, NogcPatch1, sizeof(NogcPatch1)); + break; default: break; } diff --git a/libraries/libexosphere/include/exosphere/pkg1/pkg1_key_generation.hpp b/libraries/libexosphere/include/exosphere/pkg1/pkg1_key_generation.hpp index 0cfc436336..92aea986e6 100644 --- a/libraries/libexosphere/include/exosphere/pkg1/pkg1_key_generation.hpp +++ b/libraries/libexosphere/include/exosphere/pkg1/pkg1_key_generation.hpp @@ -38,6 +38,7 @@ namespace ams::pkg1 { KeyGeneration_16_0_0 = 0x0F, KeyGeneration_17_0_0 = 0x10, KeyGeneration_18_0_0 = 0x11, + KeyGeneration_19_0_0 = 0x12, KeyGeneration_Count, diff --git a/libraries/libexosphere/include/exosphere/pkg2.hpp b/libraries/libexosphere/include/exosphere/pkg2.hpp index 8775d2ed99..d72ce1f03a 100644 --- a/libraries/libexosphere/include/exosphere/pkg2.hpp +++ b/libraries/libexosphere/include/exosphere/pkg2.hpp @@ -24,7 +24,7 @@ namespace ams::pkg2 { constexpr inline int PayloadCount = 3; constexpr inline int MinimumValidDataVersion = 0; /* We allow older package2 to load; this value is currently 0x18 in Nintendo's code. */ - constexpr inline int CurrentBootloaderVersion = 0x15; + constexpr inline int CurrentBootloaderVersion = 0x16; struct Package2Meta { using Magic = util::FourCC<'P','K','2','1'>; diff --git a/libraries/libexosphere/source/fuse/fuse_api.cpp b/libraries/libexosphere/source/fuse/fuse_api.cpp index 95c316d689..e20dddcc05 100644 --- a/libraries/libexosphere/source/fuse/fuse_api.cpp +++ b/libraries/libexosphere/source/fuse/fuse_api.cpp @@ -177,6 +177,7 @@ namespace ams::fuse { } constexpr const TargetFirmware FuseVersionIncrementFirmwares[] = { + TargetFirmware_19_0_0, TargetFirmware_17_0_0, TargetFirmware_16_0_0, TargetFirmware_15_0_0, diff --git a/libraries/libstratosphere/source/fs/impl/fs_id_string_impl.os.generic.cpp b/libraries/libstratosphere/source/fs/impl/fs_id_string_impl.os.generic.cpp index 091b19d3af..5e2d9b60fd 100644 --- a/libraries/libstratosphere/source/fs/impl/fs_id_string_impl.os.generic.cpp +++ b/libraries/libstratosphere/source/fs/impl/fs_id_string_impl.os.generic.cpp @@ -21,7 +21,7 @@ namespace ams::fs::impl { #define ADD_ENUM_CASE(v) case v: return #v template<> const char *IdString::ToString(pkg1::KeyGeneration id) { - static_assert(pkg1::KeyGeneration_Current == pkg1::KeyGeneration_18_0_0); + static_assert(pkg1::KeyGeneration_Current == pkg1::KeyGeneration_19_0_0); switch (id) { using enum pkg1::KeyGeneration; case KeyGeneration_1_0_0: return "1.0.0-2.3.0"; @@ -41,7 +41,8 @@ namespace ams::fs::impl { case KeyGeneration_15_0_0: return "15.0.0-15.0.1"; case KeyGeneration_16_0_0: return "16.0.0-16.0.3"; case KeyGeneration_17_0_0: return "17.0.0-17.0.1"; - case KeyGeneration_18_0_0: return "18.0.0-"; + case KeyGeneration_18_0_0: return "18.0.0-18.1.0"; + case KeyGeneration_19_0_0: return "19.0.0-"; default: return "Unknown"; } } diff --git a/libraries/libvapours/include/vapours/ams/ams_api_version.h b/libraries/libvapours/include/vapours/ams/ams_api_version.h index 0e06af0fb7..978b1622ed 100644 --- a/libraries/libvapours/include/vapours/ams/ams_api_version.h +++ b/libraries/libvapours/include/vapours/ams/ams_api_version.h @@ -16,11 +16,11 @@ #pragma once #define ATMOSPHERE_RELEASE_VERSION_MAJOR 1 -#define ATMOSPHERE_RELEASE_VERSION_MINOR 7 -#define ATMOSPHERE_RELEASE_VERSION_MICRO 1 +#define ATMOSPHERE_RELEASE_VERSION_MINOR 8 +#define ATMOSPHERE_RELEASE_VERSION_MICRO 0 #define ATMOSPHERE_RELEASE_VERSION ATMOSPHERE_RELEASE_VERSION_MAJOR, ATMOSPHERE_RELEASE_VERSION_MINOR, ATMOSPHERE_RELEASE_VERSION_MICRO -#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MAJOR 18 -#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MINOR 1 +#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MAJOR 19 +#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MINOR 0 #define ATMOSPHERE_SUPPORTED_HOS_VERSION_MICRO 0 diff --git a/libraries/libvapours/include/vapours/ams/ams_target_firmware.h b/libraries/libvapours/include/vapours/ams/ams_target_firmware.h index 0dde6da819..484f76f8b1 100644 --- a/libraries/libvapours/include/vapours/ams/ams_target_firmware.h +++ b/libraries/libvapours/include/vapours/ams/ams_target_firmware.h @@ -82,8 +82,9 @@ #define ATMOSPHERE_TARGET_FIRMWARE_17_0_1 ATMOSPHERE_TARGET_FIRMWARE(17, 0, 1) #define ATMOSPHERE_TARGET_FIRMWARE_18_0_0 ATMOSPHERE_TARGET_FIRMWARE(18, 0, 0) #define ATMOSPHERE_TARGET_FIRMWARE_18_1_0 ATMOSPHERE_TARGET_FIRMWARE(18, 1, 0) +#define ATMOSPHERE_TARGET_FIRMWARE_19_0_0 ATMOSPHERE_TARGET_FIRMWARE(19, 0, 0) -#define ATMOSPHERE_TARGET_FIRMWARE_CURRENT ATMOSPHERE_TARGET_FIRMWARE_18_1_0 +#define ATMOSPHERE_TARGET_FIRMWARE_CURRENT ATMOSPHERE_TARGET_FIRMWARE_19_0_0 #define ATMOSPHERE_TARGET_FIRMWARE_MIN ATMOSPHERE_TARGET_FIRMWARE(0, 0, 0) #define ATMOSPHERE_TARGET_FIRMWARE_MAX ATMOSPHERE_TARGET_FIRMWARE_CURRENT @@ -158,6 +159,7 @@ namespace ams { TargetFirmware_17_0_1 = ATMOSPHERE_TARGET_FIRMWARE_17_0_1, TargetFirmware_18_0_0 = ATMOSPHERE_TARGET_FIRMWARE_18_0_0, TargetFirmware_18_1_0 = ATMOSPHERE_TARGET_FIRMWARE_18_1_0, + TargetFirmware_19_0_0 = ATMOSPHERE_TARGET_FIRMWARE_19_0_0, TargetFirmware_Current = ATMOSPHERE_TARGET_FIRMWARE_CURRENT, From 4793ba2caaf70c5ebc3f62f6aa0808115244c6ae Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Tue, 8 Oct 2024 12:24:41 -0700 Subject: [PATCH 02/35] erpt: add new IDs/categories --- .../stratosphere/erpt/erpt_ids.autogen.hpp | 80 +++++++++++++------ utilities/erpt.py | 3 + 2 files changed, 59 insertions(+), 24 deletions(-) diff --git a/libraries/libstratosphere/include/stratosphere/erpt/erpt_ids.autogen.hpp b/libraries/libstratosphere/include/stratosphere/erpt/erpt_ids.autogen.hpp index 92d6db7dc6..c7e5eef6bc 100644 --- a/libraries/libstratosphere/include/stratosphere/erpt/erpt_ids.autogen.hpp +++ b/libraries/libstratosphere/include/stratosphere/erpt/erpt_ids.autogen.hpp @@ -81,8 +81,8 @@ HANDLER(NetworkErrorInfo, 40 ) \ HANDLER(FileAccessPathInfo, 41 ) \ HANDLER(GameCardCIDInfo, 42 ) \ - HANDLER(NANDCIDInfo, 43 ) \ - HANDLER(MicroSDCIDInfo, 44 ) \ + HANDLER(NANDCIDInfoDeprecated, 43 ) \ + HANDLER(MicroSDCIDInfoDeprecated, 44 ) \ HANDLER(NANDSpeedModeInfo, 45 ) \ HANDLER(MicroSDSpeedModeInfo, 46 ) \ HANDLER(GameCardSpeedModeInfo, 47 ) \ @@ -112,7 +112,7 @@ HANDLER(FocusedAppletHistoryInfo, 71 ) \ HANDLER(CompositorInfo, 72 ) \ HANDLER(BatteryChargeInfo, 73 ) \ - HANDLER(NANDExtendedCsd, 74 ) \ + HANDLER(NANDExtendedCsdDeprecated, 74 ) \ HANDLER(NANDPatrolInfo, 75 ) \ HANDLER(NANDErrorInfo, 76 ) \ HANDLER(NANDDriverLog, 77 ) \ @@ -178,9 +178,12 @@ HANDLER(BuiltInWirelessOUIInfo, 137 ) \ HANDLER(WirelessAPOUIInfo, 138 ) \ HANDLER(EthernetAdapterOUIInfo, 139 ) \ - HANDLER(NANDTypeInfo, 140 ) \ + HANDLER(NANDTypeInfoDeprecated, 140 ) \ HANDLER(MicroSDTypeInfo, 141 ) \ - HANDLER(TestNx, 1000) + HANDLER(AttachmentFileInfo, 142 ) \ + HANDLER(TestNx, 1000) \ + HANDLER(NANDTypeInfo, 1001) \ + HANDLER(NANDExtendedCsd, 1002) \ #define AMS_ERPT_FOREACH_FIELD(HANDLER) \ HANDLER(TestU64, 0, Test, FieldType_NumericU64, FieldFlag_None ) \ @@ -280,8 +283,8 @@ HANDLER(CDNContentPath, 94, NetworkErrorInfo, FieldType_String, FieldFlag_None ) \ HANDLER(FileAccessPath, 95, FileAccessPathInfo, FieldType_String, FieldFlag_None ) \ HANDLER(GameCardCID, 96, GameCardCIDInfo, FieldType_U8Array, FieldFlag_None ) \ - HANDLER(NANDCID, 97, NANDCIDInfo, FieldType_U8Array, FieldFlag_None ) \ - HANDLER(MicroSDCID, 98, MicroSDCIDInfo, FieldType_U8Array, FieldFlag_None ) \ + HANDLER(NANDCIDDeprecated, 97, NANDCIDInfoDeprecated, FieldType_U8Array, FieldFlag_None ) \ + HANDLER(MicroSDCIDDeprecated, 98, MicroSDCIDInfoDeprecated, FieldType_U8Array, FieldFlag_None ) \ HANDLER(NANDSpeedMode, 99, NANDSpeedModeInfo, FieldType_String, FieldFlag_None ) \ HANDLER(MicroSDSpeedMode, 100, MicroSDSpeedModeInfo, FieldType_String, FieldFlag_None ) \ HANDLER(GameCardSpeedMode, 101, GameCardSpeedModeInfo, FieldType_String, FieldFlag_None ) \ @@ -373,9 +376,9 @@ HANDLER(FastBatteryChargingEnabled, 187, BatteryChargeInfo, FieldType_Bool, FieldFlag_None ) \ HANDLER(ControllerPowerSupplyAcquiredDeprecated, 188, BatteryChargeInfo, FieldType_Bool, FieldFlag_None ) \ HANDLER(OtgRequestedDeprecated, 189, BatteryChargeInfo, FieldType_Bool, FieldFlag_None ) \ - HANDLER(NANDPreEolInfo, 190, NANDExtendedCsd, FieldType_NumericU32, FieldFlag_None ) \ - HANDLER(NANDDeviceLifeTimeEstTypA, 191, NANDExtendedCsd, FieldType_NumericU32, FieldFlag_None ) \ - HANDLER(NANDDeviceLifeTimeEstTypB, 192, NANDExtendedCsd, FieldType_NumericU32, FieldFlag_None ) \ + HANDLER(NANDPreEolInfoDeprecated, 190, NANDExtendedCsdDeprecated, FieldType_NumericU32, FieldFlag_None ) \ + HANDLER(NANDDeviceLifeTimeEstTypADeprecated, 191, NANDExtendedCsdDeprecated, FieldType_NumericU32, FieldFlag_None ) \ + HANDLER(NANDDeviceLifeTimeEstTypBDeprecated, 192, NANDExtendedCsdDeprecated, FieldType_NumericU32, FieldFlag_None ) \ HANDLER(NANDPatrolCount, 193, NANDPatrolInfo, FieldType_NumericU32, FieldFlag_None ) \ HANDLER(NANDNumActivationFailures, 194, NANDErrorInfo, FieldType_NumericU32, FieldFlag_None ) \ HANDLER(NANDNumActivationErrorCorrections, 195, NANDErrorInfo, FieldType_NumericU32, FieldFlag_None ) \ @@ -446,21 +449,21 @@ HANDLER(AdspExceptionStackDumpDeprecated, 260, AdspErrorInfo, FieldType_U32Array, FieldFlag_None ) \ HANDLER(AdspExceptionReasonDeprecated, 261, AdspErrorInfo, FieldType_NumericU32, FieldFlag_None ) \ HANDLER(OscillatorClock, 262, PowerClockInfo, FieldType_NumericU32, FieldFlag_None ) \ - HANDLER(CpuDvfsTableClocks, 263, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \ - HANDLER(CpuDvfsTableVoltages, 264, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \ - HANDLER(GpuDvfsTableClocks, 265, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \ - HANDLER(GpuDvfsTableVoltages, 266, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \ - HANDLER(EmcDvfsTableClocks, 267, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \ - HANDLER(EmcDvfsTableVoltages, 268, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \ + HANDLER(CpuDvfsTableClocksDeprecated, 263, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \ + HANDLER(CpuDvfsTableVoltagesDeprecated, 264, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \ + HANDLER(GpuDvfsTableClocksDeprecated, 265, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \ + HANDLER(GpuDvfsTableVoltagesDeprecated, 266, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \ + HANDLER(EmcDvfsTableClocksDeprecated, 267, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \ + HANDLER(EmcDvfsTableVoltagesDeprecated, 268, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \ HANDLER(ModuleClockFrequencies, 269, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \ HANDLER(ModuleClockEnableFlags, 270, PowerClockInfo, FieldType_U8Array, FieldFlag_None ) \ HANDLER(ModulePowerEnableFlags, 271, PowerClockInfo, FieldType_U8Array, FieldFlag_None ) \ HANDLER(ModuleResetAssertFlags, 272, PowerClockInfo, FieldType_U8Array, FieldFlag_None ) \ HANDLER(ModuleMinimumVoltageClockRates, 273, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \ - HANDLER(PowerDomainEnableFlags, 274, PowerClockInfo, FieldType_U8Array, FieldFlag_None ) \ - HANDLER(PowerDomainVoltages, 275, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \ + HANDLER(PowerDomainEnableFlagsDeprecated, 274, PowerClockInfo, FieldType_U8Array, FieldFlag_None ) \ + HANDLER(PowerDomainVoltagesDeprecated, 275, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \ HANDLER(AccessPointRssi, 276, RadioStrengthInfo, FieldType_NumericI32, FieldFlag_None ) \ - HANDLER(FuseInfo, 277, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \ + HANDLER(FuseInfoDeprecated, 277, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \ HANDLER(VideoLog, 278, VideoInfo, FieldType_String, FieldFlag_None ) \ HANDLER(GameCardDeviceId, 279, GameCardCIDInfo, FieldType_U8Array, FieldFlag_None ) \ HANDLER(GameCardAsicReinitializeCount, 280, GameCardErrorInfo, FieldType_NumericU16, FieldFlag_None ) \ @@ -830,9 +833,9 @@ HANDLER(RuntimeLimitedApplicationLicenseUpgrade, 644, RunningApplicationInfo, FieldType_NumericU8, FieldFlag_None ) \ HANDLER(ServiceProfileRevisionKey, 645, ServiceProfileInfo, FieldType_NumericU64, FieldFlag_None ) \ HANDLER(BluetoothAudioConnectionCount, 646, BluetoothAudioInfo, FieldType_NumericU8, FieldFlag_None ) \ - HANDLER(BluetoothHidPairingInfoCount, 647, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \ - HANDLER(BluetoothAudioPairingInfoCount, 648, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \ - HANDLER(BluetoothLePairingInfoCount, 649, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \ + HANDLER(BluetoothHidPairingInfoCountDeprecated, 647, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \ + HANDLER(BluetoothAudioPairingInfoCountDeprecated, 648, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \ + HANDLER(BluetoothLePairingInfoCountDeprecated, 649, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \ HANDLER(FatFsBisSystemFilePeakOpenCount, 650, FsProxyErrorInfo, FieldType_NumericU16, FieldFlag_None ) \ HANDLER(FatFsBisSystemDirectoryPeakOpenCount, 651, FsProxyErrorInfo, FieldType_NumericU16, FieldFlag_None ) \ HANDLER(FatFsBisUserFilePeakOpenCount, 652, FsProxyErrorInfo, FieldType_NumericU16, FieldFlag_None ) \ @@ -860,11 +863,24 @@ HANDLER(FatFsBisUserFatErrorNumber, 674, FsProxyErrorInfo2, FieldType_NumericI32, FieldFlag_None ) \ HANDLER(FatFsBisUserFatSafeErrorNumber, 675, FsProxyErrorInfo2, FieldType_NumericI32, FieldFlag_None ) \ HANDLER(GpuCrashDump2, 676, GpuCrashInfo, FieldType_U8Array, FieldFlag_None ) \ - HANDLER(NANDType, 677, NANDTypeInfo, FieldType_U8Array, FieldFlag_None ) \ + HANDLER(NANDTypeDeprecated, 677, NANDTypeInfoDeprecated, FieldType_U8Array, FieldFlag_None ) \ HANDLER(MicroSDType, 678, MicroSDTypeInfo, FieldType_U8Array, FieldFlag_None ) \ HANDLER(GameCardLastDeactivateReasonResult, 679, GameCardErrorInfo, FieldType_NumericU32, FieldFlag_None ) \ HANDLER(GameCardLastDeactivateReason, 680, GameCardErrorInfo, FieldType_NumericU8, FieldFlag_None ) \ HANDLER(InvalidErrorCode, 681, ErrorInfo, FieldType_String, FieldFlag_None ) \ + HANDLER(AppletId, 682, ApplicationInfo, FieldType_NumericI32, FieldFlag_None ) \ + HANDLER(PrevReportIdentifier, 683, ErrorInfoAuto, FieldType_String, FieldFlag_None ) \ + HANDLER(SyslogStartupTimeBase, 684, ErrorInfoAuto, FieldType_NumericI64, FieldFlag_None ) \ + HANDLER(NxdmpIsAttached, 685, AttachmentFileInfo, FieldType_Bool, FieldFlag_None ) \ + HANDLER(ScreenshotIsAttached, 686, AttachmentFileInfo, FieldType_Bool, FieldFlag_None ) \ + HANDLER(SyslogIsAttached, 687, AttachmentFileInfo, FieldType_Bool, FieldFlag_None ) \ + HANDLER(SaveSyslogResult, 688, AttachmentFileInfo, FieldType_NumericU32, FieldFlag_None ) \ + HANDLER(EncryptionKeyGeneration, 689, ErrorInfo, FieldType_NumericI32, FieldFlag_None ) \ + HANDLER(FsBufferManagerNonBlockingRetriedCount, 690, FsMemoryInfo, FieldType_NumericU64, FieldFlag_None ) \ + HANDLER(FsPooledBufferNonBlockingRetriedCount, 691, FsMemoryInfo, FieldType_NumericU64, FieldFlag_None ) \ + HANDLER(LastConnectionTestDownloadSpeed64, 692, ConnectionInfo, FieldType_NumericU64, FieldFlag_None ) \ + HANDLER(LastConnectionTestUploadSpeed64, 693, ConnectionInfo, FieldType_NumericU64, FieldFlag_None ) \ + HANDLER(EncryptionKeyV1, 694, ErrorInfo, FieldType_U8Array, FieldFlag_None ) \ HANDLER(TestStringNx, 1000, TestNx, FieldType_String, FieldFlag_None ) \ HANDLER(BoostModeCurrentLimit, 1001, BatteryChargeInfo, FieldType_NumericI32, FieldFlag_None ) \ HANDLER(ChargeConfiguration, 1002, BatteryChargeInfo, FieldType_NumericI32, FieldFlag_None ) \ @@ -877,5 +893,21 @@ HANDLER(AdspExceptionArmModeRegisters, 1009, AdspErrorInfo, FieldType_U32Array, FieldFlag_None ) \ HANDLER(AdspExceptionStackAddress, 1010, AdspErrorInfo, FieldType_NumericU32, FieldFlag_None ) \ HANDLER(AdspExceptionStackDump, 1011, AdspErrorInfo, FieldType_U32Array, FieldFlag_None ) \ - HANDLER(AdspExceptionReason, 1012, AdspErrorInfo, FieldType_NumericU32, FieldFlag_None ) + HANDLER(AdspExceptionReason, 1012, AdspErrorInfo, FieldType_NumericU32, FieldFlag_None ) \ + HANDLER(CpuDvfsTableClocks, 1013, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \ + HANDLER(CpuDvfsTableVoltages, 1014, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \ + HANDLER(GpuDvfsTableClocks, 1015, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \ + HANDLER(GpuDvfsTableVoltages, 1016, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \ + HANDLER(EmcDvfsTableClocks, 1017, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \ + HANDLER(EmcDvfsTableVoltages, 1018, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \ + HANDLER(PowerDomainEnableFlags, 1019, PowerClockInfo, FieldType_U8Array, FieldFlag_None ) \ + HANDLER(PowerDomainVoltages, 1020, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \ + HANDLER(FuseInfo, 1021, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \ + HANDLER(NANDType, 1022, NANDTypeInfo, FieldType_U8Array, FieldFlag_None ) \ + HANDLER(BluetoothHidPairingInfoCount, 1023, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \ + HANDLER(BluetoothAudioPairingInfoCount, 1024, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \ + HANDLER(BluetoothLePairingInfoCount, 1025, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \ + HANDLER(NANDPreEolInfo, 1026, NANDExtendedCsd, FieldType_NumericU32, FieldFlag_None ) \ + HANDLER(NANDDeviceLifeTimeEstTypA, 1027, NANDExtendedCsd, FieldType_NumericU32, FieldFlag_None ) \ + HANDLER(NANDDeviceLifeTimeEstTypB, 1028, NANDExtendedCsd, FieldType_NumericU32, FieldFlag_None ) diff --git a/utilities/erpt.py b/utilities/erpt.py index 94d0919cd9..fb697cb130 100644 --- a/utilities/erpt.py +++ b/utilities/erpt.py @@ -238,7 +238,10 @@ 139 : 'EthernetAdapterOUIInfo', 140 : 'NANDTypeInfo', 141 : 'MicroSDTypeInfo', + 142 : 'AttachmentFileInfo', 1000 : 'TestNx', + 1001 : 'NANDTypeInfo', + 1002 : 'NANDExtendedCsd', } FIELD_TYPES = { From 34e27ee97dc0b52e80a9a4f0c4d3c2e8bd0d0f2c Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Tue, 8 Oct 2024 16:12:56 -0700 Subject: [PATCH 03/35] emummc: update for 19.0.0 --- emummc/README.md | 2 +- emummc/source/FS/FS_offsets.c | 8 ++++ emummc/source/FS/FS_versions.h | 3 ++ emummc/source/FS/offsets/1900.h | 59 +++++++++++++++++++++++++++ emummc/source/FS/offsets/1900_exfat.h | 59 +++++++++++++++++++++++++++ 5 files changed, 130 insertions(+), 1 deletion(-) create mode 100644 emummc/source/FS/offsets/1900.h create mode 100644 emummc/source/FS/offsets/1900_exfat.h diff --git a/emummc/README.md b/emummc/README.md index f01783ba5c..cada2cdb5f 100644 --- a/emummc/README.md +++ b/emummc/README.md @@ -2,7 +2,7 @@ *A SDMMC driver replacement for Nintendo's Filesystem Services, by **m4xw*** ### Supported Horizon Versions -**1.0.0 - 18.1.0** +**1.0.0 - 19.0.0** ## Features * Arbitrary SDMMC backend selection diff --git a/emummc/source/FS/FS_offsets.c b/emummc/source/FS/FS_offsets.c index b2059c0924..64ac345620 100644 --- a/emummc/source/FS/FS_offsets.c +++ b/emummc/source/FS/FS_offsets.c @@ -73,6 +73,8 @@ #include "offsets/1800_exfat.h" #include "offsets/1810.h" #include "offsets/1810_exfat.h" +#include "offsets/1900.h" +#include "offsets/1900_exfat.h" #include "../utils/fatal.h" #define GET_OFFSET_STRUCT_NAME(vers) g_offsets##vers @@ -157,6 +159,8 @@ DEFINE_OFFSET_STRUCT(_1800); DEFINE_OFFSET_STRUCT(_1800_EXFAT); DEFINE_OFFSET_STRUCT(_1810); DEFINE_OFFSET_STRUCT(_1810_EXFAT); +DEFINE_OFFSET_STRUCT(_1900); +DEFINE_OFFSET_STRUCT(_1900_EXFAT); const fs_offsets_t *get_fs_offsets(enum FS_VER version) { switch (version) { @@ -274,6 +278,10 @@ const fs_offsets_t *get_fs_offsets(enum FS_VER version) { return &(GET_OFFSET_STRUCT_NAME(_1810)); case FS_VER_18_1_0_EXFAT: return &(GET_OFFSET_STRUCT_NAME(_1810_EXFAT)); + case FS_VER_19_0_0: + return &(GET_OFFSET_STRUCT_NAME(_1900)); + case FS_VER_19_0_0_EXFAT: + return &(GET_OFFSET_STRUCT_NAME(_1900_EXFAT)); default: fatal_abort(Fatal_UnknownVersion); } diff --git a/emummc/source/FS/FS_versions.h b/emummc/source/FS/FS_versions.h index ec4f9ecf8e..6eebac186d 100644 --- a/emummc/source/FS/FS_versions.h +++ b/emummc/source/FS/FS_versions.h @@ -107,6 +107,9 @@ enum FS_VER FS_VER_18_1_0, FS_VER_18_1_0_EXFAT, + FS_VER_19_0_0, + FS_VER_19_0_0_EXFAT, + FS_VER_MAX, }; diff --git a/emummc/source/FS/offsets/1900.h b/emummc/source/FS/offsets/1900.h new file mode 100644 index 0000000000..82ffd0f94a --- /dev/null +++ b/emummc/source/FS/offsets/1900.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2019 m4xw + * Copyright (c) 2019 Atmosphere-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __FS_1900_H__ +#define __FS_1900_H__ + +// Accessor vtable getters +#define FS_OFFSET_1900_SDMMC_ACCESSOR_GC 0x195C00 +#define FS_OFFSET_1900_SDMMC_ACCESSOR_SD 0x197F80 +#define FS_OFFSET_1900_SDMMC_ACCESSOR_NAND 0x1963B0 + +// Hooks +#define FS_OFFSET_1900_SDMMC_WRAPPER_READ 0x191A70 +#define FS_OFFSET_1900_SDMMC_WRAPPER_WRITE 0x191AD0 +#define FS_OFFSET_1900_RTLD 0x275F0 +#define FS_OFFSET_1900_RTLD_DESTINATION ((uintptr_t)(INT64_C(-0x50))) + +#define FS_OFFSET_1900_CLKRST_SET_MIN_V_CLK_RATE 0x1B3880 + +// Misc funcs +#define FS_OFFSET_1900_LOCK_MUTEX 0x18AC20 +#define FS_OFFSET_1900_UNLOCK_MUTEX 0x18AC70 + +#define FS_OFFSET_1900_SDMMC_WRAPPER_CONTROLLER_OPEN 0x191A30 +#define FS_OFFSET_1900_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x191A50 + +// Misc Data +#define FS_OFFSET_1900_SD_MUTEX 0xFE1408 +#define FS_OFFSET_1900_NAND_MUTEX 0xFDCB60 +#define FS_OFFSET_1900_ACTIVE_PARTITION 0xFDCBA0 +#define FS_OFFSET_1900_SDMMC_DAS_HANDLE 0xFC1908 + +// NOPs +#define FS_OFFSET_1900_SD_DAS_INIT 0x260C4 + +// Nintendo Paths +#define FS_OFFSET_1900_NINTENDO_PATHS \ +{ \ + {.opcode_reg = 3, .adrp_offset = 0x00067FC8, .add_rel_offset = 0x00000004}, \ + {.opcode_reg = 3, .adrp_offset = 0x00075D6C, .add_rel_offset = 0x00000004}, \ + {.opcode_reg = 4, .adrp_offset = 0x0007D1E8, .add_rel_offset = 0x00000004}, \ + {.opcode_reg = 4, .adrp_offset = 0x00092818, .add_rel_offset = 0x00000004}, \ + {.opcode_reg = 0, .adrp_offset = 0, .add_rel_offset = 0}, \ +} + +#endif // __FS_1900_H__ diff --git a/emummc/source/FS/offsets/1900_exfat.h b/emummc/source/FS/offsets/1900_exfat.h new file mode 100644 index 0000000000..2bbf0c8994 --- /dev/null +++ b/emummc/source/FS/offsets/1900_exfat.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2019 m4xw + * Copyright (c) 2019 Atmosphere-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __FS_1900_EXFAT_H__ +#define __FS_1900_EXFAT_H__ + +// Accessor vtable getters +#define FS_OFFSET_1900_EXFAT_SDMMC_ACCESSOR_GC 0x1A1430 +#define FS_OFFSET_1900_EXFAT_SDMMC_ACCESSOR_SD 0x1A37B0 +#define FS_OFFSET_1900_EXFAT_SDMMC_ACCESSOR_NAND 0x1A1BE0 + +// Hooks +#define FS_OFFSET_1900_EXFAT_SDMMC_WRAPPER_READ 0x19D2A0 +#define FS_OFFSET_1900_EXFAT_SDMMC_WRAPPER_WRITE 0x19D300 +#define FS_OFFSET_1900_EXFAT_RTLD 0x275F0 +#define FS_OFFSET_1900_EXFAT_RTLD_DESTINATION ((uintptr_t)(INT64_C(-0x50))) + +#define FS_OFFSET_1900_EXFAT_CLKRST_SET_MIN_V_CLK_RATE 0x1BF0B0 + +// Misc funcs +#define FS_OFFSET_1900_EXFAT_LOCK_MUTEX 0x196450 +#define FS_OFFSET_1900_EXFAT_UNLOCK_MUTEX 0x1964A0 + +#define FS_OFFSET_1900_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0x19D260 +#define FS_OFFSET_1900_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x19D280 + +// Misc Data +#define FS_OFFSET_1900_EXFAT_SD_MUTEX 0xFF4408 +#define FS_OFFSET_1900_EXFAT_NAND_MUTEX 0xFEFB60 +#define FS_OFFSET_1900_EXFAT_ACTIVE_PARTITION 0xFEFBA0 +#define FS_OFFSET_1900_EXFAT_SDMMC_DAS_HANDLE 0xFCF908 + +// NOPs +#define FS_OFFSET_1900_EXFAT_SD_DAS_INIT 0x260C4 + +// Nintendo Paths +#define FS_OFFSET_1900_EXFAT_NINTENDO_PATHS \ +{ \ + {.opcode_reg = 3, .adrp_offset = 0x00067FC8, .add_rel_offset = 0x00000004}, \ + {.opcode_reg = 3, .adrp_offset = 0x00075D6C, .add_rel_offset = 0x00000004}, \ + {.opcode_reg = 4, .adrp_offset = 0x0007D1E8, .add_rel_offset = 0x00000004}, \ + {.opcode_reg = 4, .adrp_offset = 0x00092818, .add_rel_offset = 0x00000004}, \ + {.opcode_reg = 0, .adrp_offset = 0, .add_rel_offset = 0}, \ +} + +#endif // __FS_1900_EXFAT_H__ From 483d06ac0e2229776c0e3a48dffe5469e6432e99 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 9 Oct 2024 11:04:43 -0700 Subject: [PATCH 04/35] kern: add InfoType_TransferMemoryHint --- .../mesosphere/kern_k_transfer_memory.hpp | 16 ++++++++++++++++ .../libmesosphere/source/svc/kern_svc_info.cpp | 13 +++++++++++++ .../include/vapours/svc/svc_types_common.hpp | 2 ++ 3 files changed, 31 insertions(+) diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_transfer_memory.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_transfer_memory.hpp index cd88f3abf1..c34ceb8c36 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_transfer_memory.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_transfer_memory.hpp @@ -48,6 +48,22 @@ namespace ams::kern { KProcess *GetOwner() const { return m_owner; } KProcessAddress GetSourceAddress() { return m_address; } size_t GetSize() const { return m_is_initialized ? GetReference(m_page_group).GetNumPages() * PageSize : 0; } + + constexpr uintptr_t GetHint() const { + /* Get memory size. */ + const size_t size = this->GetSize(); + + /* TODO: Is this architecture specific? */ + if (size >= 2_MB) { + return GetInteger(m_address) & (2_MB - 1); + } else if (size >= 64_KB) { + return GetInteger(m_address) & (64_KB - 1); + } else if (size >= 4_KB) { + return GetInteger(m_address) & (4_KB - 1); + } else { + return 0; + } + } }; } diff --git a/libraries/libmesosphere/source/svc/kern_svc_info.cpp b/libraries/libmesosphere/source/svc/kern_svc_info.cpp index e9153edbb6..ec41ef59ae 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_info.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_info.cpp @@ -314,6 +314,19 @@ namespace ams::kern::svc { *out = io_region->GetHint(); } break; + case ams::svc::InfoType_TransferMemoryHint: + { + /* Verify the sub-type is valid. */ + R_UNLESS(info_subtype == 0, svc::ResultInvalidCombination()); + + /* Get the transfer memory from its handle. */ + KScopedAutoObject transfer_memory = GetCurrentProcess().GetHandleTable().GetObject(handle); + R_UNLESS(transfer_memory.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the transfer memory's address hint. */ + *out = transfer_memory->GetHint(); + } + break; case ams::svc::InfoType_MesosphereMeta: { /* Verify the handle is invalid. */ diff --git a/libraries/libvapours/include/vapours/svc/svc_types_common.hpp b/libraries/libvapours/include/vapours/svc/svc_types_common.hpp index 5668cd2dd8..0247f59b90 100644 --- a/libraries/libvapours/include/vapours/svc/svc_types_common.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_types_common.hpp @@ -191,6 +191,8 @@ namespace ams::svc { InfoType_IsSvcPermitted = 26, InfoType_IoRegionHint = 27, InfoType_AliasRegionExtraSize = 28, + /* ... */ + InfoType_TransferMemoryHint = 34, InfoType_MesosphereMeta = 65000, InfoType_MesosphereCurrentProcess = 65001, From 456b88ed9a9d2a84f36ed3a21867fc6b4b281f49 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 9 Oct 2024 11:36:17 -0700 Subject: [PATCH 05/35] kern/svc: update WaitForAddress to support 64-bit WaitIfEqual --- .../arm64/kern_userspace_memory_access.hpp | 1 + .../mesosphere/kern_k_address_arbiter.hpp | 11 +-- .../include/mesosphere/kern_k_process.hpp | 2 +- .../arm64/kern_userspace_memory_access_asm.s | 14 ++++ .../arm64/svc/kern_svc_address_arbiter_asm.s | 67 +++++++++++++++++++ .../source/arch/arm64/svc/kern_svc_tables.cpp | 8 +++ .../source/kern_k_address_arbiter.cpp | 51 ++++++++++++++ .../source/svc/kern_svc_address_arbiter.cpp | 17 +++-- .../svc/svc_stratosphere_shims.hpp | 2 +- .../vapours/svc/svc_definition_macro.hpp | 2 +- .../include/vapours/svc/svc_types_common.hpp | 1 + 11 files changed, 163 insertions(+), 13 deletions(-) create mode 100644 libraries/libmesosphere/source/arch/arm64/svc/kern_svc_address_arbiter_asm.s diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp index 202be5f515..9e0c1844eb 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp @@ -25,6 +25,7 @@ namespace ams::kern::arch::arm64 { static bool CopyMemoryFromUser(void *dst, const void *src, size_t size); static bool CopyMemoryFromUserAligned32Bit(void *dst, const void *src, size_t size); static bool CopyMemoryFromUserAligned64Bit(void *dst, const void *src, size_t size); + static bool CopyMemoryFromUserSize64Bit(void *dst, const void *src); static bool CopyMemoryFromUserSize32Bit(void *dst, const void *src); static s32 CopyStringFromUser(void *dst, const void *src, size_t size); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_address_arbiter.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_address_arbiter.hpp index 4066de80d3..f4ef74e6bf 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_address_arbiter.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_address_arbiter.hpp @@ -39,14 +39,16 @@ namespace ams::kern { } } - Result WaitForAddress(uintptr_t addr, ams::svc::ArbitrationType type, s32 value, s64 timeout) { + Result WaitForAddress(uintptr_t addr, ams::svc::ArbitrationType type, s64 value, s64 timeout) { switch (type) { case ams::svc::ArbitrationType_WaitIfLessThan: - R_RETURN(this->WaitIfLessThan(addr, value, false, timeout)); + R_RETURN(this->WaitIfLessThan(addr, static_cast(value), false, timeout)); case ams::svc::ArbitrationType_DecrementAndWaitIfLessThan: - R_RETURN(this->WaitIfLessThan(addr, value, true, timeout)); + R_RETURN(this->WaitIfLessThan(addr, static_cast(value), true, timeout)); case ams::svc::ArbitrationType_WaitIfEqual: - R_RETURN(this->WaitIfEqual(addr, value, timeout)); + R_RETURN(this->WaitIfEqual(addr, static_cast(value), timeout)); + case ams::svc::ArbitrationType_WaitIfEqual64: + R_RETURN(this->WaitIfEqual64(addr, value, timeout)); MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); } } @@ -56,6 +58,7 @@ namespace ams::kern { Result SignalAndModifyByWaitingCountIfEqual(uintptr_t addr, s32 value, s32 count); Result WaitIfLessThan(uintptr_t addr, s32 value, bool decrement, s64 timeout); Result WaitIfEqual(uintptr_t addr, s32 value, s64 timeout); + Result WaitIfEqual64(uintptr_t addr, s64 value, s64 timeout); }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp index 3258d41e2b..5a37e3a6b6 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp @@ -360,7 +360,7 @@ namespace ams::kern { R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count)); } - Result WaitAddressArbiter(uintptr_t address, ams::svc::ArbitrationType arb_type, s32 value, s64 timeout) { + Result WaitAddressArbiter(uintptr_t address, ams::svc::ArbitrationType arb_type, s64 value, s64 timeout) { R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout)); } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s b/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s index 5b1fd4a51b..e697fde3c9 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s +++ b/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s @@ -126,6 +126,20 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned64BitEPvPKvm: mov x0, #1 ret +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryFromUserSize64Bit(void *dst, const void *src) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize64BitEPvPKv, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize64BitEPvPKv +.type _ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize64BitEPvPKv, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize64BitEPvPKv: + /* Just load and store a u64. */ + ldtr x2, [x1] + str x2, [x0] + + /* We're done. */ + mov x0, #1 + ret + /* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryFromUserSize32Bit(void *dst, const void *src) */ .section .text._ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize32BitEPvPKv, "ax", %progbits .global _ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize32BitEPvPKv diff --git a/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_address_arbiter_asm.s b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_address_arbiter_asm.s new file mode 100644 index 0000000000..f82db4454f --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_address_arbiter_asm.s @@ -0,0 +1,67 @@ +/* + * Copyright (c) Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* ams::kern::svc::CallWaitForAddress64From32() */ +.section .text._ZN3ams4kern3svc26CallWaitForAddress64From32Ev, "ax", %progbits +.global _ZN3ams4kern3svc26CallWaitForAddress64From32Ev +.type _ZN3ams4kern3svc26CallWaitForAddress64From32Ev, %function +_ZN3ams4kern3svc26CallWaitForAddress64From32Ev: + /* Save LR + callee-save registers. */ + str x30, [sp, #-16]! + stp x6, x7, [sp, #-16]! + + /* Gather the arguments into correct registers. */ + /* NOTE: This has to be manually implemented via asm, */ + /* in order to avoid breaking ABI with pre-19.0.0. */ + orr x2, x2, x5, lsl#32 + orr x3, x3, x4, lsl#32 + + /* Invoke the svc handler. */ + bl _ZN3ams4kern3svc22WaitForAddress64From32ENS_3svc7AddressENS2_15ArbitrationTypeEll + + /* Clean up registers. */ + mov x1, xzr + mov x2, xzr + mov x3, xzr + mov x4, xzr + mov x5, xzr + + ldp x6, x7, [sp], #0x10 + ldr x30, [sp], #0x10 + ret + +/* ams::kern::svc::CallWaitForAddress64() */ +.section .text._ZN3ams4kern3svc20CallWaitForAddress64Ev, "ax", %progbits +.global _ZN3ams4kern3svc20CallWaitForAddress64Ev +.type _ZN3ams4kern3svc20CallWaitForAddress64Ev, %function +_ZN3ams4kern3svc20CallWaitForAddress64Ev: + /* Save LR + FP. */ + stp x29, x30, [sp, #-16]! + + /* Invoke the svc handler. */ + bl _ZN3ams4kern3svc22WaitForAddress64From32ENS_3svc7AddressENS2_15ArbitrationTypeEll + + /* Clean up registers. */ + mov x1, xzr + mov x2, xzr + mov x3, xzr + mov x4, xzr + mov x5, xzr + mov x6, xzr + mov x7, xzr + + ldp x29, x30, [sp], #0x10 + ret diff --git a/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp index cb91518234..cfc0cb7c58 100644 --- a/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp +++ b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp @@ -36,6 +36,10 @@ namespace ams::kern::svc { /* Declare special prototype for (unsupported) CallCallSecureMonitor64From32. */ void CallCallSecureMonitor64From32(); + /* Declare special prototypes for WaitForAddress. */ + void CallWaitForAddress64(); + void CallWaitForAddress64From32(); + namespace { #ifndef MESOSPHERE_USE_STUBBED_SVC_TABLES @@ -81,6 +85,8 @@ namespace ams::kern::svc { table[svc::SvcId_CallSecureMonitor] = CallCallSecureMonitor64From32; + table[svc::SvcId_WaitForAddress] = CallWaitForAddress64From32; + return table; }(); @@ -97,6 +103,8 @@ namespace ams::kern::svc { table[svc::SvcId_ReturnFromException] = CallReturnFromException64; + table[svc::SvcId_WaitForAddress] = CallWaitForAddress64; + return table; }(); diff --git a/libraries/libmesosphere/source/kern_k_address_arbiter.cpp b/libraries/libmesosphere/source/kern_k_address_arbiter.cpp index c084c90533..66c0c1645f 100644 --- a/libraries/libmesosphere/source/kern_k_address_arbiter.cpp +++ b/libraries/libmesosphere/source/kern_k_address_arbiter.cpp @@ -23,6 +23,10 @@ namespace ams::kern { return UserspaceAccess::CopyMemoryFromUserSize32Bit(out, GetVoidPointer(address)); } + ALWAYS_INLINE bool ReadFromUser(s64 *out, KProcessAddress address) { + return UserspaceAccess::CopyMemoryFromUserSize64Bit(out, GetVoidPointer(address)); + } + ALWAYS_INLINE bool DecrementIfLessThan(s32 *out, KProcessAddress address, s32 value) { /* NOTE: If scheduler lock is not held here, interrupt disable is required. */ /* KScopedInterruptDisable di; */ @@ -279,4 +283,51 @@ namespace ams::kern { R_RETURN(cur_thread->GetWaitResult()); } + Result KAddressArbiter::WaitIfEqual64(uintptr_t addr, s64 value, s64 timeout) { + /* Prepare to wait. */ + KThread *cur_thread = GetCurrentThreadPointer(); + KHardwareTimer *timer; + ThreadQueueImplForKAddressArbiter wait_queue(std::addressof(m_tree)); + + { + KScopedSchedulerLockAndSleep slp(std::addressof(timer), cur_thread, timeout); + + /* Check that the thread isn't terminating. */ + if (cur_thread->IsTerminationRequested()) { + slp.CancelSleep(); + R_THROW(svc::ResultTerminationRequested()); + } + + /* Read the value from userspace. */ + s64 user_value; + if (!ReadFromUser(std::addressof(user_value), addr)) { + slp.CancelSleep(); + R_THROW(svc::ResultInvalidCurrentMemory()); + } + + /* Check that the value is equal. */ + if (value != user_value) { + slp.CancelSleep(); + R_THROW(svc::ResultInvalidState()); + } + + /* Check that the timeout is non-zero. */ + if (timeout == 0) { + slp.CancelSleep(); + R_THROW(svc::ResultTimedOut()); + } + + /* Set the arbiter. */ + cur_thread->SetAddressArbiter(std::addressof(m_tree), addr); + m_tree.insert(*cur_thread); + + /* Wait for the thread to finish. */ + wait_queue.SetHardwareTimer(timer); + cur_thread->BeginWait(std::addressof(wait_queue)); + } + + /* Get the wait result. */ + R_RETURN(cur_thread->GetWaitResult()); + } + } diff --git a/libraries/libmesosphere/source/svc/kern_svc_address_arbiter.cpp b/libraries/libmesosphere/source/svc/kern_svc_address_arbiter.cpp index 68eb61e843..2b495ad452 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_address_arbiter.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_address_arbiter.cpp @@ -41,17 +41,22 @@ namespace ams::kern::svc { case ams::svc::ArbitrationType_WaitIfLessThan: case ams::svc::ArbitrationType_DecrementAndWaitIfLessThan: case ams::svc::ArbitrationType_WaitIfEqual: + case ams::svc::ArbitrationType_WaitIfEqual64: return true; default: return false; } } - Result WaitForAddress(uintptr_t address, ams::svc::ArbitrationType arb_type, int32_t value, int64_t timeout_ns) { + Result WaitForAddress(uintptr_t address, ams::svc::ArbitrationType arb_type, int64_t value, int64_t timeout_ns) { /* Validate input. */ - R_UNLESS(AMS_LIKELY(!IsKernelAddress(address)), svc::ResultInvalidCurrentMemory()); - R_UNLESS(util::IsAligned(address, sizeof(int32_t)), svc::ResultInvalidAddress()); - R_UNLESS(IsValidArbitrationType(arb_type), svc::ResultInvalidEnumValue()); + R_UNLESS(AMS_LIKELY(!IsKernelAddress(address)), svc::ResultInvalidCurrentMemory()); + if (arb_type == ams::svc::ArbitrationType_WaitIfEqual64) { + R_UNLESS(util::IsAligned(address, sizeof(int64_t)), svc::ResultInvalidAddress()); + } else { + R_UNLESS(util::IsAligned(address, sizeof(int32_t)), svc::ResultInvalidAddress()); + } + R_UNLESS(IsValidArbitrationType(arb_type), svc::ResultInvalidEnumValue()); /* Convert timeout from nanoseconds to ticks. */ s64 timeout; @@ -85,7 +90,7 @@ namespace ams::kern::svc { /* ============================= 64 ABI ============================= */ - Result WaitForAddress64(ams::svc::Address address, ams::svc::ArbitrationType arb_type, int32_t value, int64_t timeout_ns) { + Result WaitForAddress64(ams::svc::Address address, ams::svc::ArbitrationType arb_type, int64_t value, int64_t timeout_ns) { R_RETURN(WaitForAddress(address, arb_type, value, timeout_ns)); } @@ -95,7 +100,7 @@ namespace ams::kern::svc { /* ============================= 64From32 ABI ============================= */ - Result WaitForAddress64From32(ams::svc::Address address, ams::svc::ArbitrationType arb_type, int32_t value, int64_t timeout_ns) { + Result WaitForAddress64From32(ams::svc::Address address, ams::svc::ArbitrationType arb_type, int64_t value, int64_t timeout_ns) { R_RETURN(WaitForAddress(address, arb_type, value, timeout_ns)); } diff --git a/libraries/libstratosphere/include/stratosphere/svc/svc_stratosphere_shims.hpp b/libraries/libstratosphere/include/stratosphere/svc/svc_stratosphere_shims.hpp index b724ce5f1c..d68a56f33f 100644 --- a/libraries/libstratosphere/include/stratosphere/svc/svc_stratosphere_shims.hpp +++ b/libraries/libstratosphere/include/stratosphere/svc/svc_stratosphere_shims.hpp @@ -231,7 +231,7 @@ R_RETURN(::svcGetThreadContext3(reinterpret_cast<::ThreadContext *>(out_context.GetPointerUnsafe()), thread_handle)); } - ALWAYS_INLINE Result WaitForAddress(::ams::svc::Address address, ::ams::svc::ArbitrationType arb_type, int32_t value, int64_t timeout_ns) { + ALWAYS_INLINE Result WaitForAddress(::ams::svc::Address address, ::ams::svc::ArbitrationType arb_type, int64_t value, int64_t timeout_ns) { R_RETURN(::svcWaitForAddress(reinterpret_cast(static_cast(address)), arb_type, value, timeout_ns)); } diff --git a/libraries/libvapours/include/vapours/svc/svc_definition_macro.hpp b/libraries/libvapours/include/vapours/svc/svc_definition_macro.hpp index 0f1c577836..7e4121aa38 100644 --- a/libraries/libvapours/include/vapours/svc/svc_definition_macro.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_definition_macro.hpp @@ -67,7 +67,7 @@ HANDLER(0x31, Result, GetResourceLimitCurrentValue, OUTPUT(int64_t, out_current_value), INPUT(::ams::svc::Handle, resource_limit_handle), INPUT(::ams::svc::LimitableResource, which)) \ HANDLER(0x32, Result, SetThreadActivity, INPUT(::ams::svc::Handle, thread_handle), INPUT(::ams::svc::ThreadActivity, thread_activity)) \ HANDLER(0x33, Result, GetThreadContext3, OUTPTR(::ams::svc::ThreadContext, out_context), INPUT(::ams::svc::Handle, thread_handle)) \ - HANDLER(0x34, Result, WaitForAddress, INPUT(::ams::svc::Address, address), INPUT(::ams::svc::ArbitrationType, arb_type), INPUT(int32_t, value), INPUT(int64_t, timeout_ns)) \ + HANDLER(0x34, Result, WaitForAddress, INPUT(::ams::svc::Address, address), INPUT(::ams::svc::ArbitrationType, arb_type), INPUT(int64_t, value), INPUT(int64_t, timeout_ns)) \ HANDLER(0x35, Result, SignalToAddress, INPUT(::ams::svc::Address, address), INPUT(::ams::svc::SignalType, signal_type), INPUT(int32_t, value), INPUT(int32_t, count)) \ HANDLER(0x36, void, SynchronizePreemptionState) \ HANDLER(0x37, Result, GetResourceLimitPeakValue, OUTPUT(int64_t, out_peak_value), INPUT(::ams::svc::Handle, resource_limit_handle), INPUT(::ams::svc::LimitableResource, which)) \ diff --git a/libraries/libvapours/include/vapours/svc/svc_types_common.hpp b/libraries/libvapours/include/vapours/svc/svc_types_common.hpp index 0247f59b90..21ab685d03 100644 --- a/libraries/libvapours/include/vapours/svc/svc_types_common.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_types_common.hpp @@ -263,6 +263,7 @@ namespace ams::svc { ArbitrationType_WaitIfLessThan = 0, ArbitrationType_DecrementAndWaitIfLessThan = 1, ArbitrationType_WaitIfEqual = 2, + ArbitrationType_WaitIfEqual64 = 3, }; enum YieldType : s64 { From 93d4656d0b07f951852b5ed0fcc9fec37bf634db Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 9 Oct 2024 12:03:21 -0700 Subject: [PATCH 06/35] kern: KAddressSpaceInfo now takes CreateProcessFlags in getters --- .../mesosphere/kern_k_address_space_info.hpp | 4 +-- .../source/kern_k_address_space_info.cpp | 26 +++++++++++--- .../source/kern_k_initial_process_reader.cpp | 35 ++++++++++--------- .../source/kern_k_page_table_base.cpp | 4 +-- .../source/svc/kern_svc_process.cpp | 14 ++++---- 5 files changed, 52 insertions(+), 31 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_address_space_info.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_address_space_info.hpp index 9e20dd2543..8af8404cc9 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_address_space_info.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_address_space_info.hpp @@ -37,8 +37,8 @@ namespace ams::kern { size_t m_size; Type m_type; public: - static uintptr_t GetAddressSpaceStart(size_t width, Type type); - static size_t GetAddressSpaceSize(size_t width, Type type); + static uintptr_t GetAddressSpaceStart(ams::svc::CreateProcessFlag flags, Type type); + static size_t GetAddressSpaceSize(ams::svc::CreateProcessFlag flags, Type type); static void SetAddressSpaceSize(size_t width, Type type, size_t size); diff --git a/libraries/libmesosphere/source/kern_k_address_space_info.cpp b/libraries/libmesosphere/source/kern_k_address_space_info.cpp index ef585eba8f..b23f6e9296 100644 --- a/libraries/libmesosphere/source/kern_k_address_space_info.cpp +++ b/libraries/libmesosphere/source/kern_k_address_space_info.cpp @@ -37,6 +37,24 @@ namespace ams::kern { { 39, Invalid, ams::svc::AddressMemoryRegionStack39Size, KAddressSpaceInfo::Type_Stack, }, }; + constexpr u8 FlagsToAddressSpaceWidthTable[4] = { + 32, 36, 32, 39 + }; + + constexpr size_t GetAddressSpaceWidth(ams::svc::CreateProcessFlag flags) { + /* Convert the input flags to an array index. */ + const size_t idx = (flags & ams::svc::CreateProcessFlag_AddressSpaceMask) >> ams::svc::CreateProcessFlag_AddressSpaceShift; + MESOSPHERE_ABORT_UNLESS(idx < sizeof(FlagsToAddressSpaceWidthTable)); + + /* Return the width. */ + return FlagsToAddressSpaceWidthTable[idx]; + } + + static_assert(GetAddressSpaceWidth(ams::svc::CreateProcessFlag_AddressSpace32Bit) == 32); + static_assert(GetAddressSpaceWidth(ams::svc::CreateProcessFlag_AddressSpace64BitDeprecated) == 36); + static_assert(GetAddressSpaceWidth(ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias) == 32); + static_assert(GetAddressSpaceWidth(ams::svc::CreateProcessFlag_AddressSpace64Bit) == 39); + KAddressSpaceInfo &GetAddressSpaceInfo(size_t width, KAddressSpaceInfo::Type type) { for (auto &info : AddressSpaceInfos) { if (info.GetWidth() == width && info.GetType() == type) { @@ -48,12 +66,12 @@ namespace ams::kern { } - uintptr_t KAddressSpaceInfo::GetAddressSpaceStart(size_t width, KAddressSpaceInfo::Type type) { - return GetAddressSpaceInfo(width, type).GetAddress(); + uintptr_t KAddressSpaceInfo::GetAddressSpaceStart(ams::svc::CreateProcessFlag flags, KAddressSpaceInfo::Type type) { + return GetAddressSpaceInfo(GetAddressSpaceWidth(flags), type).GetAddress(); } - size_t KAddressSpaceInfo::GetAddressSpaceSize(size_t width, KAddressSpaceInfo::Type type) { - return GetAddressSpaceInfo(width, type).GetSize(); + size_t KAddressSpaceInfo::GetAddressSpaceSize(ams::svc::CreateProcessFlag flags, KAddressSpaceInfo::Type type) { + return GetAddressSpaceInfo(GetAddressSpaceWidth(flags), type).GetSize(); } void KAddressSpaceInfo::SetAddressSpaceSize(size_t width, Type type, size_t size) { diff --git a/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp b/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp index c5ef13ac55..9c91889f9d 100644 --- a/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp +++ b/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp @@ -193,40 +193,25 @@ namespace ams::kern { R_UNLESS(this->Is64Bit(), svc::ResultInvalidCombination()); } - using ASType = KAddressSpaceInfo::Type; - const uintptr_t start_address = rx_address; const uintptr_t end_address = bss_size > 0 ? bss_address + bss_size : rw_address + rw_size; - const size_t as_width = this->Is64BitAddressSpace() ? ((GetTargetFirmware() >= TargetFirmware_2_0_0) ? 39 : 36) : 32; - const ASType as_type = this->Is64BitAddressSpace() ? ((GetTargetFirmware() >= TargetFirmware_2_0_0) ? KAddressSpaceInfo::Type_Map39Bit : KAddressSpaceInfo::Type_MapSmall) : KAddressSpaceInfo::Type_MapSmall; - const uintptr_t map_start = KAddressSpaceInfo::GetAddressSpaceStart(as_width, as_type); - const size_t map_size = KAddressSpaceInfo::GetAddressSpaceSize(as_width, as_type); - const uintptr_t map_end = map_start + map_size; MESOSPHERE_ABORT_UNLESS(start_address == 0); /* Default fields in parameter to zero. */ *out = {}; /* Set fields in parameter. */ - out->code_address = map_start + start_address; + out->code_address = 0; out->code_num_pages = util::AlignUp(end_address - start_address, PageSize) / PageSize; out->program_id = m_kip_header.GetProgramId(); out->version = m_kip_header.GetVersion(); out->flags = 0; out->reslimit = ams::svc::InvalidHandle; out->system_resource_num_pages = 0; - MESOSPHERE_ABORT_UNLESS((out->code_address / PageSize) + out->code_num_pages <= (map_end / PageSize)); /* Copy name field. */ m_kip_header.GetName(out->name, sizeof(out->name)); - /* Apply ASLR, if needed. */ - if (enable_aslr) { - const size_t choices = (map_end / KernelAslrAlignment) - (util::AlignUp(out->code_address + out->code_num_pages * PageSize, KernelAslrAlignment) / KernelAslrAlignment); - out->code_address += KSystemControl::GenerateRandomRange(0, choices) * KernelAslrAlignment; - out->flags |= ams::svc::CreateProcessFlag_EnableAslr; - } - /* Apply other flags. */ if (this->Is64Bit()) { out->flags |= ams::svc::CreateProcessFlag_Is64Bit; @@ -236,10 +221,28 @@ namespace ams::kern { } else { out->flags |= ams::svc::CreateProcessFlag_AddressSpace32Bit; } + if (enable_aslr) { + out->flags |= ams::svc::CreateProcessFlag_EnableAslr; + } /* All initial processes should disable device address space merge. */ out->flags |= ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge; + /* Set and check code address. */ + using ASType = KAddressSpaceInfo::Type; + const ASType as_type = this->Is64BitAddressSpace() ? ((GetTargetFirmware() >= TargetFirmware_2_0_0) ? KAddressSpaceInfo::Type_Map39Bit : KAddressSpaceInfo::Type_MapSmall) : KAddressSpaceInfo::Type_MapSmall; + const uintptr_t map_start = KAddressSpaceInfo::GetAddressSpaceStart(static_cast(out->flags), as_type); + const size_t map_size = KAddressSpaceInfo::GetAddressSpaceSize(static_cast(out->flags), as_type); + const uintptr_t map_end = map_start + map_size; + out->code_address = map_start + start_address; + MESOSPHERE_ABORT_UNLESS((out->code_address / PageSize) + out->code_num_pages <= (map_end / PageSize)); + + /* Apply ASLR, if needed. */ + if (enable_aslr) { + const size_t choices = (map_end / KernelAslrAlignment) - (util::AlignUp(out->code_address + out->code_num_pages * PageSize, KernelAslrAlignment) / KernelAslrAlignment); + out->code_address += KSystemControl::GenerateRandomRange(0, choices) * KernelAslrAlignment; + } + R_SUCCEED(); } diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index 2e90d924e5..613dc0dbb9 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -141,10 +141,10 @@ namespace ams::kern { /* Define helpers. */ auto GetSpaceStart = [&](KAddressSpaceInfo::Type type) ALWAYS_INLINE_LAMBDA { - return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); + return KAddressSpaceInfo::GetAddressSpaceStart(flags, type); }; auto GetSpaceSize = [&](KAddressSpaceInfo::Type type) ALWAYS_INLINE_LAMBDA { - return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type); + return KAddressSpaceInfo::GetAddressSpaceSize(flags, type); }; /* Default to zero alias region extra size. */ diff --git a/libraries/libmesosphere/source/svc/kern_svc_process.cpp b/libraries/libmesosphere/source/svc/kern_svc_process.cpp index 76e36302bd..5141a2c312 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_process.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_process.cpp @@ -110,8 +110,8 @@ namespace ams::kern::svc { case ams::svc::CreateProcessFlag_AddressSpace32Bit: case ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias: { - map_start = KAddressSpaceInfo::GetAddressSpaceStart(32, KAddressSpaceInfo::Type_MapSmall); - map_size = KAddressSpaceInfo::GetAddressSpaceSize(32, KAddressSpaceInfo::Type_MapSmall); + map_start = KAddressSpaceInfo::GetAddressSpaceStart(static_cast(params.flags), KAddressSpaceInfo::Type_MapSmall); + map_size = KAddressSpaceInfo::GetAddressSpaceSize(static_cast(params.flags), KAddressSpaceInfo::Type_MapSmall); map_end = map_start + map_size; } break; @@ -120,8 +120,8 @@ namespace ams::kern::svc { /* 64-bit address space requires 64-bit process. */ R_UNLESS(is_64_bit, svc::ResultInvalidCombination()); - map_start = KAddressSpaceInfo::GetAddressSpaceStart(36, KAddressSpaceInfo::Type_MapSmall); - map_size = KAddressSpaceInfo::GetAddressSpaceSize(36, KAddressSpaceInfo::Type_MapSmall); + map_start = KAddressSpaceInfo::GetAddressSpaceStart(static_cast(params.flags), KAddressSpaceInfo::Type_MapSmall); + map_size = KAddressSpaceInfo::GetAddressSpaceSize(static_cast(params.flags), KAddressSpaceInfo::Type_MapSmall); map_end = map_start + map_size; } break; @@ -130,10 +130,10 @@ namespace ams::kern::svc { /* 64-bit address space requires 64-bit process. */ R_UNLESS(is_64_bit, svc::ResultInvalidCombination()); - map_start = KAddressSpaceInfo::GetAddressSpaceStart(39, KAddressSpaceInfo::Type_Map39Bit); - map_end = map_start + KAddressSpaceInfo::GetAddressSpaceSize(39, KAddressSpaceInfo::Type_Map39Bit); + map_start = KAddressSpaceInfo::GetAddressSpaceStart(static_cast(params.flags), KAddressSpaceInfo::Type_Map39Bit); + map_end = map_start + KAddressSpaceInfo::GetAddressSpaceSize(static_cast(params.flags), KAddressSpaceInfo::Type_Map39Bit); - map_size = KAddressSpaceInfo::GetAddressSpaceSize(39, KAddressSpaceInfo::Type_Heap); + map_size = KAddressSpaceInfo::GetAddressSpaceSize(static_cast(params.flags), KAddressSpaceInfo::Type_Heap); } break; default: From e3ee4cb544993dba4d0a9543a80d26bd641dfc85 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 9 Oct 2024 12:44:11 -0700 Subject: [PATCH 07/35] kern: eliminate use of KMemoryInfo, shuffle KMemoryBlock fields --- .../mesosphere/kern_k_memory_block.hpp | 27 +- .../mesosphere/kern_k_page_table_base.hpp | 2 +- .../source/kern_k_memory_block_manager.cpp | 130 ++++---- .../source/kern_k_page_table_base.cpp | 296 ++++++++---------- 4 files changed, 196 insertions(+), 259 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp index f021391e0c..2847de3375 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp @@ -200,7 +200,8 @@ namespace ams::kern { KMemoryBlockDisableMergeAttribute_DeviceLeft = (1u << 1), KMemoryBlockDisableMergeAttribute_IpcLeft = (1u << 2), KMemoryBlockDisableMergeAttribute_Locked = (1u << 3), - KMemoryBlockDisableMergeAttribute_DeviceRight = (1u << 4), + /* ... */ + KMemoryBlockDisableMergeAttribute_DeviceRight = (1u << 5), KMemoryBlockDisableMergeAttribute_AllLeft = KMemoryBlockDisableMergeAttribute_Normal | KMemoryBlockDisableMergeAttribute_DeviceLeft | KMemoryBlockDisableMergeAttribute_IpcLeft | KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_AllRight = KMemoryBlockDisableMergeAttribute_DeviceRight, @@ -288,18 +289,18 @@ namespace ams::kern { class KMemoryBlock : public util::IntrusiveRedBlackTreeBaseNode { private: - u16 m_device_disable_merge_left_count; - u16 m_device_disable_merge_right_count; - KProcessAddress m_address; - size_t m_num_pages; - KMemoryState m_memory_state; - u16 m_ipc_lock_count; - u16 m_device_use_count; - u16 m_ipc_disable_merge_count; KMemoryPermission m_permission; KMemoryPermission m_original_permission; KMemoryAttribute m_attribute; KMemoryBlockDisableMergeAttribute m_disable_merge_attribute; + KProcessAddress m_address; + u32 m_num_pages; + KMemoryState m_memory_state; + u16 m_ipc_lock_count; + u16 m_ipc_disable_merge_count; + u16 m_device_use_count; + u16 m_device_disable_merge_left_count; + u16 m_device_disable_merge_right_count; public: static constexpr ALWAYS_INLINE int Compare(const KMemoryBlock &lhs, const KMemoryBlock &rhs) { if (lhs.GetAddress() < rhs.GetAddress()) { @@ -343,6 +344,10 @@ namespace ams::kern { return m_ipc_disable_merge_count; } + constexpr u16 GetDeviceUseCount() const { + return m_device_use_count; + } + constexpr KMemoryPermission GetPermission() const { return m_permission; } @@ -374,7 +379,7 @@ namespace ams::kern { public: explicit KMemoryBlock() { /* ... */ } - constexpr KMemoryBlock(util::ConstantInitializeTag, KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) + constexpr KMemoryBlock(util::ConstantInitializeTag, KProcessAddress addr, u32 np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) : util::IntrusiveRedBlackTreeBaseNode(util::ConstantInitialize), m_device_disable_merge_left_count(), m_device_disable_merge_right_count(), m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0), m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p), m_original_permission(KMemoryPermission_None), @@ -383,7 +388,7 @@ namespace ams::kern { /* ... */ } - constexpr void Initialize(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) { + constexpr void Initialize(KProcessAddress addr, u32 np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) { MESOSPHERE_ASSERT_THIS(); m_device_disable_merge_left_count = 0; m_device_disable_merge_right_count = 0; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp index 99a27c6f9a..8da4311df4 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp @@ -318,7 +318,7 @@ namespace ams::kern { R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr)); } - Result CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const; + Result CheckMemoryState(KMemoryBlockManager::const_iterator it, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const; Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const; Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const; Result CheckMemoryState(size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const { diff --git a/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp index 4ba4a566a0..2c5bd962de 100644 --- a/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp @@ -54,11 +54,11 @@ namespace ams::kern { return "Unknown "; } - constexpr const char *GetMemoryPermissionString(const KMemoryInfo &info) { - if (info.m_state == KMemoryState_Free) { + constexpr const char *GetMemoryPermissionString(const KMemoryBlock &block) { + if (block.GetState() == KMemoryState_Free) { return " "; } else { - switch (info.m_permission) { + switch (block.GetPermission()) { case KMemoryPermission_UserReadExecute: return "r-x"; case KMemoryPermission_UserRead: @@ -71,19 +71,19 @@ namespace ams::kern { } } - void DumpMemoryInfo(const KMemoryInfo &info) { - const char *state = GetMemoryStateName(info.m_state); - const char *perm = GetMemoryPermissionString(info); - const uintptr_t start = info.GetAddress(); - const uintptr_t end = info.GetLastAddress(); - const size_t kb = info.GetSize() / 1_KB; + void DumpMemoryBlock(const KMemoryBlock &block) { + const char *state = GetMemoryStateName(block.GetState()); + const char *perm = GetMemoryPermissionString(block); + const uintptr_t start = GetInteger(block.GetAddress()); + const uintptr_t end = GetInteger(block.GetLastAddress()); + const size_t kb = block.GetSize() / 1_KB; - const char l = (info.m_attribute & KMemoryAttribute_Locked) ? 'L' : '-'; - const char i = (info.m_attribute & KMemoryAttribute_IpcLocked) ? 'I' : '-'; - const char d = (info.m_attribute & KMemoryAttribute_DeviceShared) ? 'D' : '-'; - const char u = (info.m_attribute & KMemoryAttribute_Uncached) ? 'U' : '-'; + const char l = (block.GetAttribute() & KMemoryAttribute_Locked) ? 'L' : '-'; + const char i = (block.GetAttribute() & KMemoryAttribute_IpcLocked) ? 'I' : '-'; + const char d = (block.GetAttribute() & KMemoryAttribute_DeviceShared) ? 'D' : '-'; + const char u = (block.GetAttribute() & KMemoryAttribute_Uncached) ? 'U' : '-'; - MESOSPHERE_LOG("0x%10lx - 0x%10lx (%9zu KB) %s %s %c%c%c%c [%d, %d]\n", start, end, kb, perm, state, l, i, d, u, info.m_ipc_lock_count, info.m_device_use_count); + MESOSPHERE_LOG("0x%10lx - 0x%10lx (%9zu KB) %s %s %c%c%c%c [%d, %d]\n", start, end, kb, perm, state, l, i, d, u, block.GetIpcLockCount(), block.GetDeviceUseCount()); } } @@ -123,15 +123,14 @@ namespace ams::kern { const KProcessAddress region_end = region_start + region_num_pages * PageSize; const KProcessAddress region_last = region_end - 1; for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend(); it++) { - const KMemoryInfo info = it->GetMemoryInfo(); - if (region_last < info.GetAddress()) { + if (region_last < it->GetAddress()) { break; } - if (info.m_state != KMemoryState_Free) { + if (it->GetState() != KMemoryState_Free) { continue; } - KProcessAddress area = (info.GetAddress() <= GetInteger(region_start)) ? region_start : info.GetAddress(); + KProcessAddress area = (it->GetAddress() <= GetInteger(region_start)) ? region_start : it->GetAddress(); area += guard_pages * PageSize; const KProcessAddress offset_area = util::AlignDown(GetInteger(area), alignment) + offset; @@ -140,7 +139,7 @@ namespace ams::kern { const KProcessAddress area_end = area + num_pages * PageSize + guard_pages * PageSize; const KProcessAddress area_last = area_end - 1; - if (info.GetAddress() <= GetInteger(area) && area < area_last && area_last <= region_last && GetInteger(area_last) <= info.GetLastAddress()) { + if (GetInteger(it->GetAddress()) <= GetInteger(area) && area < area_last && area_last <= region_last && GetInteger(area_last) <= GetInteger(it->GetLastAddress())) { return area; } } @@ -171,7 +170,7 @@ namespace ams::kern { it = prev; } - if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) { + if (address + num_pages * PageSize < it->GetEndAddress()) { break; } } @@ -189,43 +188,39 @@ namespace ams::kern { while (remaining_pages > 0) { const size_t remaining_size = remaining_pages * PageSize; - KMemoryInfo cur_info = it->GetMemoryInfo(); if (it->HasProperties(state, perm, attr)) { /* If we already have the right properties, just advance. */ - if (cur_address + remaining_size < cur_info.GetEndAddress()) { + if (cur_address + remaining_size < it->GetEndAddress()) { remaining_pages = 0; cur_address += remaining_size; } else { - remaining_pages = (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; - cur_address = cur_info.GetEndAddress(); + remaining_pages = (cur_address + remaining_size - it->GetEndAddress()) / PageSize; + cur_address = it->GetEndAddress(); } } else { /* If we need to, create a new block before and insert it. */ - if (cur_info.GetAddress() != GetInteger(cur_address)) { + if (it->GetAddress() != GetInteger(cur_address)) { KMemoryBlock *new_block = allocator->Allocate(); it->Split(new_block, cur_address); it = m_memory_block_tree.insert(*new_block); it++; - cur_info = it->GetMemoryInfo(); - cur_address = cur_info.GetAddress(); + cur_address = it->GetAddress(); } /* If we need to, create a new block after and insert it. */ - if (cur_info.GetSize() > remaining_size) { + if (it->GetSize() > remaining_size) { KMemoryBlock *new_block = allocator->Allocate(); it->Split(new_block, cur_address + remaining_size); it = m_memory_block_tree.insert(*new_block); - - cur_info = it->GetMemoryInfo(); } /* Update block state. */ it->Update(state, perm, attr, it->GetAddress() == address, set_disable_attr, clear_disable_attr); - cur_address += cur_info.GetSize(); - remaining_pages -= cur_info.GetNumPages(); + cur_address += it->GetSize(); + remaining_pages -= it->GetNumPages(); } it++; } @@ -245,42 +240,38 @@ namespace ams::kern { while (remaining_pages > 0) { const size_t remaining_size = remaining_pages * PageSize; - KMemoryInfo cur_info = it->GetMemoryInfo(); if (it->HasProperties(test_state, test_perm, test_attr) && !it->HasProperties(state, perm, attr)) { /* If we need to, create a new block before and insert it. */ - if (cur_info.GetAddress() != GetInteger(cur_address)) { + if (it->GetAddress() != GetInteger(cur_address)) { KMemoryBlock *new_block = allocator->Allocate(); it->Split(new_block, cur_address); it = m_memory_block_tree.insert(*new_block); it++; - cur_info = it->GetMemoryInfo(); - cur_address = cur_info.GetAddress(); + cur_address = it->GetAddress(); } /* If we need to, create a new block after and insert it. */ - if (cur_info.GetSize() > remaining_size) { + if (it->GetSize() > remaining_size) { KMemoryBlock *new_block = allocator->Allocate(); it->Split(new_block, cur_address + remaining_size); it = m_memory_block_tree.insert(*new_block); - - cur_info = it->GetMemoryInfo(); } /* Update block state. */ it->Update(state, perm, attr, it->GetAddress() == address, set_disable_attr, clear_disable_attr); - cur_address += cur_info.GetSize(); - remaining_pages -= cur_info.GetNumPages(); + cur_address += it->GetSize(); + remaining_pages -= it->GetNumPages(); } else { /* If we already have the right properties, just advance. */ - if (cur_address + remaining_size < cur_info.GetEndAddress()) { + if (cur_address + remaining_size < it->GetEndAddress()) { remaining_pages = 0; cur_address += remaining_size; } else { - remaining_pages = (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; - cur_address = cur_info.GetEndAddress(); + remaining_pages = (cur_address + remaining_size - it->GetEndAddress()) / PageSize; + cur_address = it->GetEndAddress(); } } it++; @@ -302,34 +293,30 @@ namespace ams::kern { while (remaining_pages > 0) { const size_t remaining_size = remaining_pages * PageSize; - KMemoryInfo cur_info = it->GetMemoryInfo(); /* If we need to, create a new block before and insert it. */ - if (cur_info.m_address != GetInteger(cur_address)) { + if (it->GetAddress() != cur_address) { KMemoryBlock *new_block = allocator->Allocate(); it->Split(new_block, cur_address); it = m_memory_block_tree.insert(*new_block); it++; - cur_info = it->GetMemoryInfo(); - cur_address = cur_info.GetAddress(); + cur_address = it->GetAddress(); } - if (cur_info.GetSize() > remaining_size) { + if (it->GetSize() > remaining_size) { /* If we need to, create a new block after and insert it. */ KMemoryBlock *new_block = allocator->Allocate(); it->Split(new_block, cur_address + remaining_size); it = m_memory_block_tree.insert(*new_block); - - cur_info = it->GetMemoryInfo(); } /* Call the locked update function. */ - (std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address, cur_info.GetEndAddress() == end_address); - cur_address += cur_info.GetSize(); - remaining_pages -= cur_info.GetNumPages(); + (std::addressof(*it)->*lock_func)(perm, it->GetAddress() == address, it->GetEndAddress() == end_address); + cur_address += it->GetSize(); + remaining_pages -= it->GetNumPages(); it++; } @@ -347,43 +334,39 @@ namespace ams::kern { while (remaining_pages > 0) { const size_t remaining_size = remaining_pages * PageSize; - KMemoryInfo cur_info = it->GetMemoryInfo(); if ((it->GetAttribute() & mask) != attr) { /* If we need to, create a new block before and insert it. */ - if (cur_info.GetAddress() != GetInteger(cur_address)) { + if (it->GetAddress() != GetInteger(cur_address)) { KMemoryBlock *new_block = allocator->Allocate(); it->Split(new_block, cur_address); it = m_memory_block_tree.insert(*new_block); it++; - cur_info = it->GetMemoryInfo(); - cur_address = cur_info.GetAddress(); + cur_address = it->GetAddress(); } /* If we need to, create a new block after and insert it. */ - if (cur_info.GetSize() > remaining_size) { + if (it->GetSize() > remaining_size) { KMemoryBlock *new_block = allocator->Allocate(); it->Split(new_block, cur_address + remaining_size); it = m_memory_block_tree.insert(*new_block); - - cur_info = it->GetMemoryInfo(); } /* Update block state. */ it->UpdateAttribute(mask, attr); - cur_address += cur_info.GetSize(); - remaining_pages -= cur_info.GetNumPages(); + cur_address += it->GetSize(); + remaining_pages -= it->GetNumPages(); } else { /* If we already have the right attributes, just advance. */ - if (cur_address + remaining_size < cur_info.GetEndAddress()) { + if (cur_address + remaining_size < it->GetEndAddress()) { remaining_pages = 0; cur_address += remaining_size; } else { - remaining_pages = (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; - cur_address = cur_info.GetEndAddress(); + remaining_pages = (cur_address + remaining_size - it->GetEndAddress()) / PageSize; + cur_address = it->GetEndAddress(); } } it++; @@ -401,8 +384,6 @@ namespace ams::kern { auto it = m_memory_block_tree.cbegin(); auto prev = it++; while (it != m_memory_block_tree.cend()) { - const KMemoryInfo prev_info = prev->GetMemoryInfo(); - const KMemoryInfo cur_info = it->GetMemoryInfo(); /* Sequential blocks which can be merged should be merged. */ if (prev->CanMergeWith(*it)) { @@ -410,17 +391,17 @@ namespace ams::kern { } /* Sequential blocks should be sequential. */ - if (prev_info.GetEndAddress() != cur_info.GetAddress()) { + if (prev->GetEndAddress() != it->GetAddress()) { return false; } /* If the block is ipc locked, it must have a count. */ - if ((cur_info.m_attribute & KMemoryAttribute_IpcLocked) != 0 && cur_info.m_ipc_lock_count == 0) { + if ((it->GetAttribute() & KMemoryAttribute_IpcLocked) != 0 && it->GetIpcLockCount() == 0) { return false; } /* If the block is device shared, it must have a count. */ - if ((cur_info.m_attribute & KMemoryAttribute_DeviceShared) != 0 && cur_info.m_device_use_count == 0) { + if ((it->GetAttribute() & KMemoryAttribute_DeviceShared) != 0 && it->GetDeviceUseCount() == 0) { return false; } @@ -430,14 +411,13 @@ namespace ams::kern { /* Our loop will miss checking the last block, potentially, so check it. */ if (prev != m_memory_block_tree.cend()) { - const KMemoryInfo prev_info = prev->GetMemoryInfo(); /* If the block is ipc locked, it must have a count. */ - if ((prev_info.m_attribute & KMemoryAttribute_IpcLocked) != 0 && prev_info.m_ipc_lock_count == 0) { + if ((prev->GetAttribute() & KMemoryAttribute_IpcLocked) != 0 && prev->GetIpcLockCount() == 0) { return false; } /* If the block is device shared, it must have a count. */ - if ((prev_info.m_attribute & KMemoryAttribute_DeviceShared) != 0 && prev_info.m_device_use_count == 0) { + if ((prev->GetAttribute() & KMemoryAttribute_DeviceShared) != 0 && prev->GetDeviceUseCount() == 0) { return false; } } @@ -450,7 +430,7 @@ namespace ams::kern { void KMemoryBlockManager::DumpBlocks() const { /* Dump each block. */ for (const auto &block : m_memory_block_tree) { - DumpMemoryInfo(block.GetMemoryInfo()); + DumpMemoryBlock(block); } } } diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index 613dc0dbb9..95bc983cac 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -664,11 +664,11 @@ namespace ams::kern { } } - Result KPageTableBase::CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const { + Result KPageTableBase::CheckMemoryState(KMemoryBlockManager::const_iterator it, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const { /* Validate the states match expectation. */ - R_UNLESS((info.m_state & state_mask) == state, svc::ResultInvalidCurrentMemory()); - R_UNLESS((info.m_permission & perm_mask) == perm, svc::ResultInvalidCurrentMemory()); - R_UNLESS((info.m_attribute & attr_mask) == attr, svc::ResultInvalidCurrentMemory()); + R_UNLESS((it->GetState() & state_mask) == state, svc::ResultInvalidCurrentMemory()); + R_UNLESS((it->GetPermission() & perm_mask) == perm, svc::ResultInvalidCurrentMemory()); + R_UNLESS((it->GetAttribute() & attr_mask) == attr, svc::ResultInvalidCurrentMemory()); R_SUCCEED(); } @@ -679,28 +679,26 @@ namespace ams::kern { /* Get information about the first block. */ const KProcessAddress last_addr = addr + size - 1; KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); - KMemoryInfo info = it->GetMemoryInfo(); /* If the start address isn't aligned, we need a block. */ - const size_t blocks_for_start_align = (util::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0; + const size_t blocks_for_start_align = (util::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) ? 1 : 0; while (true) { /* Validate against the provided masks. */ - R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); + R_TRY(this->CheckMemoryState(it, state_mask, state, perm_mask, perm, attr_mask, attr)); /* Break once we're done. */ - if (last_addr <= info.GetLastAddress()) { + if (last_addr <= it->GetLastAddress()) { break; } /* Advance our iterator. */ it++; MESOSPHERE_ASSERT(it != m_memory_block_manager.cend()); - info = it->GetMemoryInfo(); } /* If the end address isn't aligned, we need a block. */ - const size_t blocks_for_end_align = (util::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0; + const size_t blocks_for_end_align = (util::AlignUp(GetInteger(addr) + size, PageSize) != it->GetEndAddress()) ? 1 : 0; if (out_blocks_needed != nullptr) { *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; @@ -712,31 +710,27 @@ namespace ams::kern { Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - /* Get information about the first block. */ - KMemoryInfo info = it->GetMemoryInfo(); - /* Validate all blocks in the range have correct state. */ - const KMemoryState first_state = info.m_state; - const KMemoryPermission first_perm = info.m_permission; - const KMemoryAttribute first_attr = info.m_attribute; + const KMemoryState first_state = it->GetState(); + const KMemoryPermission first_perm = it->GetPermission(); + const KMemoryAttribute first_attr = it->GetAttribute(); while (true) { /* Validate the current block. */ - R_UNLESS(info.m_state == first_state, svc::ResultInvalidCurrentMemory()); - R_UNLESS(info.m_permission == first_perm, svc::ResultInvalidCurrentMemory()); - R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr), svc::ResultInvalidCurrentMemory()); + R_UNLESS(it->GetState() == first_state, svc::ResultInvalidCurrentMemory()); + R_UNLESS(it->GetPermission() == first_perm, svc::ResultInvalidCurrentMemory()); + R_UNLESS((it->GetAttribute() | ignore_attr) == (first_attr | ignore_attr), svc::ResultInvalidCurrentMemory()); /* Validate against the provided masks. */ - R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); + R_TRY(this->CheckMemoryState(it, state_mask, state, perm_mask, perm, attr_mask, attr)); /* Break once we're done. */ - if (last_addr <= info.GetLastAddress()) { + if (last_addr <= it->GetLastAddress()) { break; } /* Advance our iterator. */ it++; MESOSPHERE_ASSERT(it != m_memory_block_manager.cend()); - info = it->GetMemoryInfo(); } /* Write output state. */ @@ -752,7 +746,7 @@ namespace ams::kern { /* If the end address isn't aligned, we need a block. */ if (out_blocks_needed != nullptr) { - const size_t blocks_for_end_align = (util::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress()) ? 1 : 0; + const size_t blocks_for_end_align = (util::AlignDown(GetInteger(last_addr), PageSize) + PageSize != it->GetEndAddress()) ? 1 : 0; *out_blocks_needed = blocks_for_end_align; } @@ -1176,17 +1170,14 @@ namespace ams::kern { { KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address); while (true) { - /* Get the memory info. */ - const KMemoryInfo info = it->GetMemoryInfo(); - /* Check if the memory has code flag. */ - if ((info.GetState() & KMemoryState_FlagCode) != 0) { + if ((it->GetState() & KMemoryState_FlagCode) != 0) { any_code_pages = true; break; } /* Check if we're done. */ - if (dst_address + size - 1 <= info.GetLastAddress()) { + if (dst_address + size - 1 <= it->GetLastAddress()) { break; } @@ -1355,14 +1346,13 @@ namespace ams::kern { const size_t random_offset = KSystemControl::GenerateRandomRange(0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * alignment; const KProcessAddress candidate = util::AlignDown(GetInteger(region_start + random_offset), alignment) + offset; - KMemoryInfo info; - ams::svc::PageInfo page_info; - MESOSPHERE_R_ABORT_UNLESS(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), candidate)); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(candidate); + MESOSPHERE_ABORT_UNLESS(it != m_memory_block_manager.end()); - if (info.m_state != KMemoryState_Free) { continue; } + if (it->GetState() != KMemoryState_Free) { continue; } if (!(region_start <= candidate)) { continue; } - if (!(info.GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) { continue; } - if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= info.GetLastAddress())) { continue; } + if (!(it->GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) { continue; } + if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= it->GetLastAddress())) { continue; } if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= region_start + region_num_pages * PageSize - 1)) { continue; } address = candidate; @@ -1393,10 +1383,8 @@ namespace ams::kern { /* Iterate, counting blocks with the desired state. */ size_t total_size = 0; for (KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(m_address_space_start); it != m_memory_block_manager.end(); ++it) { - /* Get the memory info. */ - const KMemoryInfo info = it->GetMemoryInfo(); - if (info.GetState() == state) { - total_size += info.GetSize(); + if (it->GetState() == state) { + total_size += it->GetSize(); } } @@ -1488,17 +1476,14 @@ namespace ams::kern { /* Check that the iterator is valid. */ MESOSPHERE_ASSERT(it != m_memory_block_manager.end()); - /* Get the memory info. */ - const KMemoryInfo info = it->GetMemoryInfo(); - /* Determine the range to map. */ - KProcessAddress map_address = std::max(info.GetAddress(), GetInteger(start_address)); - const KProcessAddress map_end_address = std::min(info.GetEndAddress(), GetInteger(end_address)); + KProcessAddress map_address = std::max(GetInteger(it->GetAddress()), GetInteger(start_address)); + const KProcessAddress map_end_address = std::min(GetInteger(it->GetEndAddress()), GetInteger(end_address)); MESOSPHERE_ABORT_UNLESS(map_end_address != map_address); /* Determine if we should disable head merge. */ - const bool disable_head_merge = info.GetAddress() >= GetInteger(start_address) && (info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Normal) != 0; - const KPageProperties map_properties = { info.GetPermission(), false, false, disable_head_merge ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None }; + const bool disable_head_merge = it->GetAddress() >= GetInteger(start_address) && (it->GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Normal) != 0; + const KPageProperties map_properties = { it->GetPermission(), false, false, disable_head_merge ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None }; /* While we have pages to map, map them. */ size_t map_pages = (map_end_address - map_address) / PageSize; @@ -1527,7 +1512,7 @@ namespace ams::kern { } /* Check if we're done. */ - if (last_address <= info.GetLastAddress()) { + if (last_address <= it->GetLastAddress()) { break; } @@ -2032,19 +2017,18 @@ namespace ams::kern { address = util::AlignDown(GetInteger(address), PageSize); /* Verify that we can query the address. */ - KMemoryInfo info; - ams::svc::PageInfo page_info; - R_TRY(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), address)); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address); + R_UNLESS(it != m_memory_block_manager.end(), svc::ResultInvalidCurrentMemory()); /* Check the memory state. */ - R_TRY(this->CheckMemoryState(info, KMemoryState_FlagCanQueryPhysical, KMemoryState_FlagCanQueryPhysical, KMemoryPermission_UserReadExecute, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None)); + R_TRY(this->CheckMemoryState(it, KMemoryState_FlagCanQueryPhysical, KMemoryState_FlagCanQueryPhysical, KMemoryPermission_UserReadExecute, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None)); /* Prepare to traverse. */ KPhysicalAddress phys_addr; size_t phys_size; - KProcessAddress virt_addr = info.GetAddress(); - KProcessAddress end_addr = info.GetEndAddress(); + KProcessAddress virt_addr = it->GetAddress(); + KProcessAddress end_addr = it->GetEndAddress(); /* Perform traversal. */ { @@ -3854,27 +3838,25 @@ namespace ams::kern { /* Iterate, mapping as needed. */ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start); while (true) { - const KMemoryInfo info = it->GetMemoryInfo(); - /* Validate the current block. */ - R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm, test_attr_mask, KMemoryAttribute_None)); + R_TRY(this->CheckMemoryState(it, test_state, test_state, test_perm, test_perm, test_attr_mask, KMemoryAttribute_None)); - if (mapping_src_start < mapping_src_end && GetInteger(mapping_src_start) < info.GetEndAddress() && info.GetAddress() < GetInteger(mapping_src_end)) { - const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start) ? info.GetAddress() : GetInteger(mapping_src_start); - const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress() : GetInteger(mapping_src_end); + if (mapping_src_start < mapping_src_end && GetInteger(mapping_src_start) < GetInteger(it->GetEndAddress()) && GetInteger(it->GetAddress()) < GetInteger(mapping_src_end)) { + const auto cur_start = it->GetAddress() >= GetInteger(mapping_src_start) ? GetInteger(it->GetAddress()) : GetInteger(mapping_src_start); + const auto cur_end = mapping_src_last >= GetInteger(it->GetLastAddress()) ? GetInteger(it->GetEndAddress()) : GetInteger(mapping_src_end); const size_t cur_size = cur_end - cur_start; - if (info.GetAddress() < GetInteger(mapping_src_start)) { + if (GetInteger(it->GetAddress()) < GetInteger(mapping_src_start)) { ++blocks_needed; } - if (mapping_src_last < info.GetLastAddress()) { + if (mapping_src_last < GetInteger(it->GetLastAddress())) { ++blocks_needed; } /* Set the permissions on the block, if we need to. */ - if ((info.GetPermission() & KMemoryPermission_IpcLockChangeMask) != src_perm) { - const DisableMergeAttribute head_body_attr = (GetInteger(mapping_src_start) >= info.GetAddress()) ? DisableMergeAttribute_DisableHeadAndBody : DisableMergeAttribute_None; - const DisableMergeAttribute tail_attr = (cur_end == GetInteger(mapping_src_end)) ? DisableMergeAttribute_DisableTail : DisableMergeAttribute_None; + if ((it->GetPermission() & KMemoryPermission_IpcLockChangeMask) != src_perm) { + const DisableMergeAttribute head_body_attr = (GetInteger(mapping_src_start) >= GetInteger(it->GetAddress())) ? DisableMergeAttribute_DisableHeadAndBody : DisableMergeAttribute_None; + const DisableMergeAttribute tail_attr = (cur_end == GetInteger(mapping_src_end)) ? DisableMergeAttribute_DisableTail : DisableMergeAttribute_None; const KPageProperties properties = { src_perm, false, false, static_cast(head_body_attr | tail_attr) }; R_TRY(this->Operate(page_list, cur_start, cur_size / PageSize, Null, false, properties, OperationType_ChangePermissions, false)); } @@ -3884,7 +3866,7 @@ namespace ams::kern { } /* If the block is at the end, we're done. */ - if (aligned_src_last <= info.GetLastAddress()) { + if (aligned_src_last <= GetInteger(it->GetLastAddress())) { break; } @@ -4248,56 +4230,50 @@ namespace ams::kern { const auto mapped_last = mapped_end - 1; /* Get current and next iterators. */ - KMemoryBlockManager::const_iterator start_it = m_memory_block_manager.FindIterator(mapping_start); - KMemoryBlockManager::const_iterator next_it = start_it; + KMemoryBlockManager::const_iterator cur_it = m_memory_block_manager.FindIterator(mapping_start); + KMemoryBlockManager::const_iterator next_it = cur_it; ++next_it; - /* Get the current block info. */ - KMemoryInfo cur_info = start_it->GetMemoryInfo(); - /* Create tracking variables. */ - KProcessAddress cur_address = cur_info.GetAddress(); - size_t cur_size = cur_info.GetSize(); - bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); - bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; - bool first = cur_info.GetIpcDisableMergeCount() == 1 && (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Locked) == 0; + KProcessAddress cur_address = cur_it->GetAddress(); + size_t cur_size = cur_it->GetSize(); + bool cur_perm_eq = cur_it->GetPermission() == cur_it->GetOriginalPermission(); + bool cur_needs_set_perm = !cur_perm_eq && cur_it->GetIpcLockCount() == 1; + bool first = cur_it->GetIpcDisableMergeCount() == 1 && (cur_it->GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Locked) == 0; while ((GetInteger(cur_address) + cur_size - 1) < mapped_last) { /* Check that we have a next block. */ MESOSPHERE_ABORT_UNLESS(next_it != m_memory_block_manager.end()); - /* Get the next info. */ - const KMemoryInfo next_info = next_it->GetMemoryInfo(); - /* Check if we can consolidate the next block's permission set with the current one. */ - const bool next_perm_eq = next_info.GetPermission() == next_info.GetOriginalPermission(); - const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; - if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { + const bool next_perm_eq = next_it->GetPermission() == next_it->GetOriginalPermission(); + const bool next_needs_set_perm = !next_perm_eq && next_it->GetIpcLockCount() == 1; + if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && cur_it->GetOriginalPermission() == next_it->GetOriginalPermission()) { /* We can consolidate the reprotection for the current and next block into a single call. */ - cur_size += next_info.GetSize(); + cur_size += next_it->GetSize(); } else { /* We have to operate on the current block. */ if ((cur_needs_set_perm || first) && !cur_perm_eq) { - const KPageProperties properties = { cur_info.GetPermission(), false, false, first ? DisableMergeAttribute_EnableAndMergeHeadBodyTail : DisableMergeAttribute_None }; + const KPageProperties properties = { cur_it->GetPermission(), false, false, first ? DisableMergeAttribute_EnableAndMergeHeadBodyTail : DisableMergeAttribute_None }; MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null, false, properties, OperationType_ChangePermissions, true)); } /* Advance. */ - cur_address = next_info.GetAddress(); - cur_size = next_info.GetSize(); + cur_address = next_it->GetAddress(); + cur_size = next_it->GetSize(); first = false; } /* Advance. */ - cur_info = next_info; cur_perm_eq = next_perm_eq; cur_needs_set_perm = next_needs_set_perm; - ++next_it; + + cur_it = next_it++; } /* Process the last block. */ if ((first || cur_needs_set_perm) && !cur_perm_eq) { - const KPageProperties properties = { cur_info.GetPermission(), false, false, first ? DisableMergeAttribute_EnableAndMergeHeadBodyTail : DisableMergeAttribute_None }; + const KPageProperties properties = { cur_it->GetPermission(), false, false, first ? DisableMergeAttribute_EnableAndMergeHeadBodyTail : DisableMergeAttribute_None }; MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null, false, properties, OperationType_ChangePermissions, true)); } } @@ -4306,41 +4282,37 @@ namespace ams::kern { /* Iterate, reprotecting as needed. */ { /* Get current and next iterators. */ - KMemoryBlockManager::const_iterator start_it = m_memory_block_manager.FindIterator(mapping_start); - KMemoryBlockManager::const_iterator next_it = start_it; + KMemoryBlockManager::const_iterator cur_it = m_memory_block_manager.FindIterator(mapping_start); + KMemoryBlockManager::const_iterator next_it = cur_it; ++next_it; /* Validate the current block. */ - KMemoryInfo cur_info = start_it->GetMemoryInfo(); - MESOSPHERE_R_ABORT_UNLESS(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked)); + MESOSPHERE_R_ABORT_UNLESS(this->CheckMemoryState(cur_it, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked)); /* Create tracking variables. */ - KProcessAddress cur_address = cur_info.GetAddress(); - size_t cur_size = cur_info.GetSize(); - bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); - bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; - bool first = cur_info.GetIpcDisableMergeCount() == 1 && (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Locked) == 0; + KProcessAddress cur_address = cur_it->GetAddress(); + size_t cur_size = cur_it->GetSize(); + bool cur_perm_eq = cur_it->GetPermission() == cur_it->GetOriginalPermission(); + bool cur_needs_set_perm = !cur_perm_eq && cur_it->GetIpcLockCount() == 1; + bool first = cur_it->GetIpcDisableMergeCount() == 1 && (cur_it->GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Locked) == 0; while ((cur_address + cur_size - 1) < mapping_last) { /* Check that we have a next block. */ MESOSPHERE_ABORT_UNLESS(next_it != m_memory_block_manager.end()); - /* Get the next info. */ - const KMemoryInfo next_info = next_it->GetMemoryInfo(); - /* Validate the next block. */ - MESOSPHERE_R_ABORT_UNLESS(this->CheckMemoryState(next_info, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked)); + MESOSPHERE_R_ABORT_UNLESS(this->CheckMemoryState(next_it, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked)); /* Check if we can consolidate the next block's permission set with the current one. */ - const bool next_perm_eq = next_info.GetPermission() == next_info.GetOriginalPermission(); - const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; - if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { + const bool next_perm_eq = next_it->GetPermission() == next_it->GetOriginalPermission(); + const bool next_needs_set_perm = !next_perm_eq && next_it->GetIpcLockCount() == 1; + if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && cur_it->GetOriginalPermission() == next_it->GetOriginalPermission()) { /* We can consolidate the reprotection for the current and next block into a single call. */ - cur_size += next_info.GetSize(); + cur_size += next_it->GetSize(); } else { /* We have to operate on the current block. */ if ((cur_needs_set_perm || first) && !cur_perm_eq) { - const KPageProperties properties = { cur_needs_set_perm ? cur_info.GetOriginalPermission() : cur_info.GetPermission(), false, false, first ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None }; + const KPageProperties properties = { cur_needs_set_perm ? cur_it->GetOriginalPermission() : cur_it->GetPermission(), false, false, first ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None }; R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null, false, properties, OperationType_ChangePermissions, false)); } @@ -4348,24 +4320,24 @@ namespace ams::kern { mapped_size += cur_size; /* Advance. */ - cur_address = next_info.GetAddress(); - cur_size = next_info.GetSize(); + cur_address = next_it->GetAddress(); + cur_size = next_it->GetSize(); first = false; } /* Advance. */ - cur_info = next_info; cur_perm_eq = next_perm_eq; cur_needs_set_perm = next_needs_set_perm; - ++next_it; + + cur_it = next_it++; } /* Process the last block. */ - const auto lock_count = cur_info.GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0); + const auto lock_count = cur_it->GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0); if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) { const DisableMergeAttribute head_body_attr = first ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None; const DisableMergeAttribute tail_attr = lock_count == 1 ? DisableMergeAttribute_EnableTail : DisableMergeAttribute_None; - const KPageProperties properties = { cur_needs_set_perm ? cur_info.GetOriginalPermission() : cur_info.GetPermission(), false, false, static_cast(head_body_attr | tail_attr) }; + const KPageProperties properties = { cur_needs_set_perm ? cur_it->GetOriginalPermission() : cur_it->GetPermission(), false, false, static_cast(head_body_attr | tail_attr) }; R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null, false, properties, OperationType_ChangePermissions, false)); } } @@ -4398,38 +4370,36 @@ namespace ams::kern { /* Iterate over blocks, fixing permissions. */ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address); while (true) { - const KMemoryInfo info = it->GetMemoryInfo(); - - const auto cur_start = info.GetAddress() >= GetInteger(src_map_start) ? info.GetAddress() : GetInteger(src_map_start); - const auto cur_end = src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress(); + const auto cur_start = it->GetAddress() >= GetInteger(src_map_start) ? it->GetAddress() : GetInteger(src_map_start); + const auto cur_end = src_map_last <= it->GetLastAddress() ? src_map_end : it->GetEndAddress(); /* If we can, fix the protections on the block. */ - if ((info.GetIpcLockCount() == 0 && (info.GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) || - (info.GetIpcLockCount() != 0 && (info.GetOriginalPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm)) + if ((it->GetIpcLockCount() == 0 && (it->GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) || + (it->GetIpcLockCount() != 0 && (it->GetOriginalPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm)) { /* Check if we actually need to fix the protections on the block. */ - if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) || (info.GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) { - const bool start_nc = (info.GetAddress() == GetInteger(src_map_start)) ? ((info.GetDisableMergeAttribute() & (KMemoryBlockDisableMergeAttribute_Locked | KMemoryBlockDisableMergeAttribute_IpcLeft)) == 0) : info.GetAddress() <= GetInteger(src_map_start); + if (cur_end == src_map_end || it->GetAddress() <= GetInteger(src_map_start) || (it->GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) { + const bool start_nc = (it->GetAddress() == GetInteger(src_map_start)) ? ((it->GetDisableMergeAttribute() & (KMemoryBlockDisableMergeAttribute_Locked | KMemoryBlockDisableMergeAttribute_IpcLeft)) == 0) : it->GetAddress() <= GetInteger(src_map_start); const DisableMergeAttribute head_body_attr = start_nc ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None; DisableMergeAttribute tail_attr; - if (cur_end == src_map_end && info.GetEndAddress() == src_map_end) { + if (cur_end == src_map_end && it->GetEndAddress() == src_map_end) { auto next_it = it; ++next_it; - const auto lock_count = info.GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0); + const auto lock_count = it->GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0); tail_attr = lock_count == 0 ? DisableMergeAttribute_EnableTail : DisableMergeAttribute_None; } else { tail_attr = DisableMergeAttribute_None; } - const KPageProperties properties = { info.GetPermission(), false, false, static_cast(head_body_attr | tail_attr) }; + const KPageProperties properties = { it->GetPermission(), false, false, static_cast(head_body_attr | tail_attr) }; MESOSPHERE_R_ABORT_UNLESS(this->Operate(page_list, cur_start, (cur_end - cur_start) / PageSize, Null, false, properties, OperationType_ChangePermissions, true)); } } /* If we're past the end of the region, we're done. */ - if (src_map_last <= info.GetLastAddress()) { + if (src_map_last <= it->GetLastAddress()) { break; } @@ -4468,24 +4438,21 @@ namespace ams::kern { /* Check that the iterator is valid. */ MESOSPHERE_ASSERT(it != m_memory_block_manager.end()); - /* Get the memory info. */ - const KMemoryInfo info = it->GetMemoryInfo(); - /* Check if we're done. */ - if (last_address <= info.GetLastAddress()) { - if (info.GetState() != KMemoryState_Free) { + if (last_address <= it->GetLastAddress()) { + if (it->GetState() != KMemoryState_Free) { mapped_size += (last_address + 1 - cur_address); } break; } /* Track the memory if it's mapped. */ - if (info.GetState() != KMemoryState_Free) { - mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address; + if (it->GetState() != KMemoryState_Free) { + mapped_size += it->GetEndAddress() - cur_address; } /* Advance. */ - cur_address = info.GetEndAddress(); + cur_address = it->GetEndAddress(); ++it; } @@ -4527,21 +4494,18 @@ namespace ams::kern { /* Check that the iterator is valid. */ MESOSPHERE_ASSERT(it != m_memory_block_manager.end()); - /* Get the memory info. */ - const KMemoryInfo info = it->GetMemoryInfo(); - - const bool is_free = info.GetState() == KMemoryState_Free; + const bool is_free = it->GetState() == KMemoryState_Free; if (is_free) { - if (info.GetAddress() < GetInteger(address)) { + if (it->GetAddress() < GetInteger(address)) { ++num_allocator_blocks; } - if (last_address < info.GetLastAddress()) { + if (last_address < it->GetLastAddress()) { ++num_allocator_blocks; } } /* Check if we're done. */ - if (last_address <= info.GetLastAddress()) { + if (last_address <= it->GetLastAddress()) { if (!is_free) { checked_mapped_size += (last_address + 1 - cur_address); } @@ -4550,11 +4514,11 @@ namespace ams::kern { /* Track the memory if it's mapped. */ if (!is_free) { - checked_mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address; + checked_mapped_size += it->GetEndAddress() - cur_address; } /* Advance. */ - cur_address = info.GetEndAddress(); + cur_address = it->GetEndAddress(); ++it; } @@ -4594,26 +4558,23 @@ namespace ams::kern { /* Check that the iterator is valid. */ MESOSPHERE_ASSERT(it != m_memory_block_manager.end()); - /* Get the memory info. */ - const KMemoryInfo info = it->GetMemoryInfo(); - /* If the memory state is free, we mapped it and need to unmap it. */ - if (info.GetState() == KMemoryState_Free) { + if (it->GetState() == KMemoryState_Free) { /* Determine the range to unmap. */ const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None }; - const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_unmap_address + 1 - cur_address) / PageSize; + const size_t cur_pages = std::min(it->GetEndAddress() - cur_address, last_unmap_address + 1 - cur_address) / PageSize; /* Unmap. */ MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_pages, Null, false, unmap_properties, OperationType_Unmap, true)); } /* Check if we're done. */ - if (last_unmap_address <= info.GetLastAddress()) { + if (last_unmap_address <= it->GetLastAddress()) { break; } /* Advance. */ - cur_address = info.GetEndAddress(); + cur_address = it->GetEndAddress(); ++it; } } @@ -4632,14 +4593,11 @@ namespace ams::kern { /* Check that the iterator is valid. */ MESOSPHERE_ASSERT(it != m_memory_block_manager.end()); - /* Get the memory info. */ - const KMemoryInfo info = it->GetMemoryInfo(); - /* If it's unmapped, we need to map it. */ - if (info.GetState() == KMemoryState_Free) { + if (it->GetState() == KMemoryState_Free) { /* Determine the range to map. */ const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, cur_address == this->GetAliasRegionStart() ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None }; - size_t map_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_address + 1 - cur_address) / PageSize; + size_t map_pages = std::min(it->GetEndAddress() - cur_address, last_address + 1 - cur_address) / PageSize; /* While we have pages to map, map them. */ { @@ -4680,12 +4638,12 @@ namespace ams::kern { } /* Check if we're done. */ - if (last_address <= info.GetLastAddress()) { + if (last_address <= it->GetLastAddress()) { break; } /* Advance. */ - cur_address = info.GetEndAddress(); + cur_address = it->GetEndAddress(); ++it; } @@ -4737,26 +4695,23 @@ namespace ams::kern { /* Check that the iterator is valid. */ MESOSPHERE_ASSERT(it != m_memory_block_manager.end()); - /* Get the memory info. */ - const KMemoryInfo info = it->GetMemoryInfo(); - /* Verify the memory's state. */ - const bool is_normal = info.GetState() == KMemoryState_Normal && info.GetAttribute() == 0; - const bool is_free = info.GetState() == KMemoryState_Free; + const bool is_normal = it->GetState() == KMemoryState_Normal && it->GetAttribute() == 0; + const bool is_free = it->GetState() == KMemoryState_Free; R_UNLESS(is_normal || is_free, svc::ResultInvalidCurrentMemory()); if (is_normal) { - R_UNLESS(info.GetAttribute() == KMemoryAttribute_None, svc::ResultInvalidCurrentMemory()); + R_UNLESS(it->GetAttribute() == KMemoryAttribute_None, svc::ResultInvalidCurrentMemory()); if (map_start_address == Null) { map_start_address = cur_address; } - map_last_address = (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address; + map_last_address = (last_address >= it->GetLastAddress()) ? it->GetLastAddress() : last_address; - if (info.GetAddress() < GetInteger(address)) { + if (it->GetAddress() < GetInteger(address)) { ++num_allocator_blocks; } - if (last_address < info.GetLastAddress()) { + if (last_address < it->GetLastAddress()) { ++num_allocator_blocks; } @@ -4764,12 +4719,12 @@ namespace ams::kern { } /* Check if we're done. */ - if (last_address <= info.GetLastAddress()) { + if (last_address <= it->GetLastAddress()) { break; } /* Advance. */ - cur_address = info.GetEndAddress(); + cur_address = it->GetEndAddress(); ++it; } @@ -4802,26 +4757,23 @@ namespace ams::kern { /* Check that the iterator is valid. */ MESOSPHERE_ASSERT(it != m_memory_block_manager.end()); - /* Get the memory info. */ - const KMemoryInfo info = it->GetMemoryInfo(); - /* If the memory state is normal, we need to unmap it. */ - if (info.GetState() == KMemoryState_Normal) { + if (it->GetState() == KMemoryState_Normal) { /* Determine the range to unmap. */ const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None }; - const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_address + 1 - cur_address) / PageSize; + const size_t cur_pages = std::min(it->GetEndAddress() - cur_address, last_address + 1 - cur_address) / PageSize; /* Unmap. */ MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_pages, Null, false, unmap_properties, OperationType_Unmap, false)); } /* Check if we're done. */ - if (last_address <= info.GetLastAddress()) { + if (last_address <= it->GetLastAddress()) { break; } /* Advance. */ - cur_address = info.GetEndAddress(); + cur_address = it->GetEndAddress(); ++it; } From 3d178950e86769fca40204b663334bd9723f598e Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 9 Oct 2024 12:51:49 -0700 Subject: [PATCH 08/35] kern: fix KMemoryBlock ctor reorder warn --- .../include/mesosphere/kern_k_memory_block.hpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp index 2847de3375..57876fbcbe 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp @@ -380,10 +380,9 @@ namespace ams::kern { explicit KMemoryBlock() { /* ... */ } constexpr KMemoryBlock(util::ConstantInitializeTag, KProcessAddress addr, u32 np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) - : util::IntrusiveRedBlackTreeBaseNode(util::ConstantInitialize), m_device_disable_merge_left_count(), - m_device_disable_merge_right_count(), m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0), - m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p), m_original_permission(KMemoryPermission_None), - m_attribute(attr), m_disable_merge_attribute() + : util::IntrusiveRedBlackTreeBaseNode(util::ConstantInitialize), m_permission(p), m_original_permission(KMemoryPermission_None), + m_attribute(attr), m_disable_merge_attribute(), m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0), + m_ipc_disable_merge_count(), m_device_use_count(0), m_device_disable_merge_left_count(), m_device_disable_merge_right_count() { /* ... */ } From c6b26921683505802e6dbce7c78d715853680286 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 9 Oct 2024 12:52:07 -0700 Subject: [PATCH 09/35] kern: clear gicd/gicc pointers in KInterruptController::Finalize --- .../source/arch/arm/kern_generic_interrupt_controller.inc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/libraries/libmesosphere/source/arch/arm/kern_generic_interrupt_controller.inc b/libraries/libmesosphere/source/arch/arm/kern_generic_interrupt_controller.inc index 864da44a5d..ef363c0cf0 100644 --- a/libraries/libmesosphere/source/arch/arm/kern_generic_interrupt_controller.inc +++ b/libraries/libmesosphere/source/arch/arm/kern_generic_interrupt_controller.inc @@ -79,6 +79,12 @@ namespace ams::kern::arch::arm { /* Setup all interrupt lines. */ SetupInterruptLines(core_id); + + /* Clear pointers, if needed. */ + if (core_id == 0) { + m_gicd = nullptr; + m_gicc = nullptr; + } } void KInterruptController::SaveCoreLocal(LocalState *state) const { From c8e73003f30d703796bc4ad265d7fad607bf7215 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 9 Oct 2024 14:04:15 -0700 Subject: [PATCH 10/35] kern: allocate all TTBR0 pages during init, use procidx as asid --- .../arch/arm64/kern_k_page_table.hpp | 25 ++++- .../arch/arm64/kern_k_process_page_table.hpp | 8 +- .../arm64/kern_k_supervisor_page_table.hpp | 3 +- .../include/mesosphere/kern_k_process.hpp | 2 +- .../source/arch/arm64/kern_k_page_table.cpp | 103 ++---------------- .../nintendo/nx/kern_k_sleep_manager.cpp | 4 +- .../libmesosphere/source/kern_k_process.cpp | 4 +- mesosphere/kernel/kernel.ld | 2 + .../source/arch/arm64/init/kern_init_core.cpp | 31 ++++++ 9 files changed, 72 insertions(+), 110 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp index 14aba9913e..370211b2bd 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp @@ -93,9 +93,13 @@ namespace ams::kern::arch::arm64 { MESOSPHERE_ASSERT(alignment < L1BlockSize); return KPageTable::GetBlockSize(static_cast(KPageTable::GetBlockType(alignment) + 1)); } + public: + /* TODO: How should this size be determined. Does the KProcess slab count need to go in a header as a define? */ + static constexpr size_t NumTtbr0Entries = 81; + private: + static constinit inline const volatile u64 s_ttbr0_entries[NumTtbr0Entries] = {}; private: KPageTableManager *m_manager; - u64 m_ttbr; u8 m_asid; protected: Result OperateImpl(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll); @@ -168,17 +172,28 @@ namespace ams::kern::arch::arm64 { return entry; } public: - constexpr explicit KPageTable(util::ConstantInitializeTag) : KPageTableBase(util::ConstantInitialize), m_manager(), m_ttbr(), m_asid() { /* ... */ } + constexpr explicit KPageTable(util::ConstantInitializeTag) : KPageTableBase(util::ConstantInitialize), m_manager(), m_asid() { /* ... */ } explicit KPageTable() { /* ... */ } static NOINLINE void Initialize(s32 core_id); - ALWAYS_INLINE void Activate(u32 proc_id) { - cpu::SwitchProcess(m_ttbr, proc_id); + static const volatile u64 &GetTtbr0Entry(size_t index) { return s_ttbr0_entries[index]; } + + static ALWAYS_INLINE u64 GetKernelTtbr0() { + return s_ttbr0_entries[0]; + } + + static ALWAYS_INLINE void ActivateKernel() { + /* Activate, using asid 0 and process id = 0xFFFFFFFF */ + cpu::SwitchProcess(GetKernelTtbr0(), 0xFFFFFFFF); + } + + static ALWAYS_INLINE void ActivateProcess(size_t proc_idx, u32 proc_id) { + cpu::SwitchProcess(s_ttbr0_entries[proc_idx + 1], proc_id); } NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end); - NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit); + NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index); Result Finalize(); private: Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp index 640b9e5e59..3ad3c96a1c 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp @@ -23,13 +23,13 @@ namespace ams::kern::arch::arm64 { private: KPageTable m_page_table; public: - void Activate(u64 id) { + void Activate(size_t process_index, u64 id) { /* Activate the page table with the specified contextidr. */ - m_page_table.Activate(id); + m_page_table.ActivateProcess(process_index, id); } - Result Initialize(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) { - R_RETURN(m_page_table.InitializeForProcess(flags, from_back, pool, code_address, code_size, system_resource, resource_limit)); + Result Initialize(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index) { + R_RETURN(m_page_table.InitializeForProcess(flags, from_back, pool, code_address, code_size, system_resource, resource_limit, process_index)); } void Finalize() { m_page_table.Finalize(); } diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp index 73d886d845..0c0602289f 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp @@ -29,8 +29,7 @@ namespace ams::kern::arch::arm64 { NOINLINE void Initialize(s32 core_id); void Activate() { - /* Activate, using process id = 0xFFFFFFFF */ - m_page_table.Activate(0xFFFFFFFF); + m_page_table.ActivateKernel(); } void ActivateForInit() { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp index 5a37e3a6b6..c4ab7f2a58 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp @@ -374,7 +374,7 @@ namespace ams::kern { /* Update the current page table. */ if (next_process) { - next_process->GetPageTable().Activate(next_process->GetProcessId()); + next_process->GetPageTable().Activate(next_process->GetSlabIndex(), next_process->GetProcessId()); } else { Kernel::GetKernelPageTable().Activate(); } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index 55e4fc52b6..c1b0c76b4d 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -85,77 +85,6 @@ namespace ams::kern::arch::arm64 { return (static_cast(asid) << 48) | (static_cast(GetInteger(table))); } - class KPageTableAsidManager { - private: - using WordType = u32; - static constexpr u8 ReservedAsids[] = { 0 }; - static constexpr size_t NumReservedAsids = util::size(ReservedAsids); - static constexpr size_t BitsPerWord = BITSIZEOF(WordType); - static constexpr size_t AsidCount = 0x100; - static constexpr size_t NumWords = AsidCount / BitsPerWord; - static constexpr WordType FullWord = ~WordType(0u); - private: - WordType m_state[NumWords]; - KLightLock m_lock; - u8 m_hint; - private: - constexpr bool TestImpl(u8 asid) const { - return m_state[asid / BitsPerWord] & (1u << (asid % BitsPerWord)); - } - constexpr void ReserveImpl(u8 asid) { - MESOSPHERE_ASSERT(!this->TestImpl(asid)); - m_state[asid / BitsPerWord] |= (1u << (asid % BitsPerWord)); - } - - constexpr void ReleaseImpl(u8 asid) { - MESOSPHERE_ASSERT(this->TestImpl(asid)); - m_state[asid / BitsPerWord] &= ~(1u << (asid % BitsPerWord)); - } - - constexpr u8 FindAvailable() const { - for (size_t i = 0; i < util::size(m_state); i++) { - if (m_state[i] == FullWord) { - continue; - } - const WordType clear_bit = (m_state[i] + 1) ^ (m_state[i]); - return BitsPerWord * i + BitsPerWord - 1 - ClearLeadingZero(clear_bit); - } - if (m_state[util::size(m_state)-1] == FullWord) { - MESOSPHERE_PANIC("Unable to reserve ASID"); - } - __builtin_unreachable(); - } - - static constexpr ALWAYS_INLINE WordType ClearLeadingZero(WordType value) { - return __builtin_clzll(value) - (BITSIZEOF(unsigned long long) - BITSIZEOF(WordType)); - } - public: - constexpr KPageTableAsidManager() : m_state(), m_lock(), m_hint() { - for (size_t i = 0; i < NumReservedAsids; i++) { - this->ReserveImpl(ReservedAsids[i]); - } - } - - u8 Reserve() { - KScopedLightLock lk(m_lock); - - if (this->TestImpl(m_hint)) { - m_hint = this->FindAvailable(); - } - - this->ReserveImpl(m_hint); - - return m_hint++; - } - - void Release(u8 asid) { - KScopedLightLock lk(m_lock); - this->ReleaseImpl(asid); - } - }; - - KPageTableAsidManager g_asid_manager; - } ALWAYS_INLINE void KPageTable::NoteUpdated() const { @@ -184,6 +113,7 @@ namespace ams::kern::arch::arm64 { this->OnKernelTableSinglePageUpdated(virt_addr); } + void KPageTable::Initialize(s32 core_id) { /* Nothing actually needed here. */ MESOSPHERE_UNUSED(core_id); @@ -194,38 +124,29 @@ namespace ams::kern::arch::arm64 { m_asid = 0; m_manager = Kernel::GetSystemSystemResource().GetPageTableManagerPointer(); - /* Allocate a page for ttbr. */ - /* NOTE: It is a postcondition of page table manager allocation that the page is all-zero. */ - const u64 asid_tag = (static_cast(m_asid) << 48ul); - const KVirtualAddress page = m_manager->Allocate(); - MESOSPHERE_ASSERT(page != Null); - m_ttbr = GetInteger(KPageTableBase::GetLinearMappedPhysicalAddress(page)) | asid_tag; - /* Initialize the base page table. */ MESOSPHERE_R_ABORT_UNLESS(KPageTableBase::InitializeForKernel(true, table, start, end)); R_SUCCEED(); } - Result KPageTable::InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) { - /* Get an ASID */ - m_asid = g_asid_manager.Reserve(); - ON_RESULT_FAILURE { g_asid_manager.Release(m_asid); }; + Result KPageTable::InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index) { + /* Determine our ASID */ + m_asid = process_index + 1; + MESOSPHERE_ABORT_UNLESS(0 < m_asid && m_asid < util::size(s_ttbr0_entries)); /* Set our manager. */ m_manager = system_resource->GetPageTableManagerPointer(); - /* Allocate a new table, and set our ttbr value. */ - const KVirtualAddress new_table = m_manager->Allocate(); - R_UNLESS(new_table != Null, svc::ResultOutOfResource()); - m_ttbr = EncodeTtbr(GetPageTablePhysicalAddress(new_table), m_asid); - ON_RESULT_FAILURE_2 { m_manager->Free(new_table); }; + /* Get the virtual address of our L1 table. */ + const KPhysicalAddress ttbr0_phys = KPhysicalAddress(s_ttbr0_entries[m_asid] & UINT64_C(0xFFFFFFFFFFFE)); + const KVirtualAddress ttbr0_virt = KMemoryLayout::GetLinearVirtualAddress(ttbr0_phys); /* Initialize our base table. */ const size_t as_width = GetAddressSpaceWidth(flags); const KProcessAddress as_start = 0; const KProcessAddress as_end = (1ul << as_width); - R_TRY(KPageTableBase::InitializeForProcess(flags, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, system_resource, resource_limit)); + R_TRY(KPageTableBase::InitializeForProcess(flags, from_back, pool, GetVoidPointer(ttbr0_virt), as_start, as_end, code_address, code_size, system_resource, resource_limit)); /* Note that we've updated the table (since we created it). */ this->NoteUpdated(); @@ -329,20 +250,16 @@ namespace ams::kern::arch::arm64 { } } - /* Free the L1 table. */ + /* Clear the L1 table. */ { const KVirtualAddress l1_table = reinterpret_cast(impl.Finalize()); ClearPageTable(l1_table); - this->GetPageTableManager().Free(l1_table); } /* Perform inherited finalization. */ KPageTableBase::Finalize(); } - /* Release our asid. */ - g_asid_manager.Release(m_asid); - R_SUCCEED(); } diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp index bb782d3907..5e5983c862 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp @@ -31,7 +31,6 @@ namespace ams::kern::board::nintendo::nx { /* Struct representing registers saved on wake/sleep. */ class SavedSystemRegisters { private: - u64 ttbr0_el1; u64 elr_el1; u64 sp_el0; u64 spsr_el1; @@ -90,7 +89,6 @@ namespace ams::kern::board::nintendo::nx { void SavedSystemRegisters::Save() { /* Save system registers. */ - this->ttbr0_el1 = cpu::GetTtbr0El1(); this->tpidr_el0 = cpu::GetTpidrEl0(); this->elr_el1 = cpu::GetElrEl1(); this->sp_el0 = cpu::GetSpEl0(); @@ -405,7 +403,7 @@ namespace ams::kern::board::nintendo::nx { cpu::EnsureInstructionConsistency(); /* Restore system registers. */ - cpu::SetTtbr0El1 (this->ttbr0_el1); + cpu::SetTtbr0El1 (KPageTable::GetKernelTtbr0()); cpu::SetTpidrEl0 (this->tpidr_el0); cpu::SetElrEl1 (this->elr_el1); cpu::SetSpEl0 (this->sp_el0); diff --git a/libraries/libmesosphere/source/kern_k_process.cpp b/libraries/libmesosphere/source/kern_k_process.cpp index 789bdbc09b..96d82f975b 100644 --- a/libraries/libmesosphere/source/kern_k_process.cpp +++ b/libraries/libmesosphere/source/kern_k_process.cpp @@ -299,7 +299,7 @@ namespace ams::kern { /* Setup page table. */ { const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0; - R_TRY(m_page_table.Initialize(static_cast(params.flags), from_back, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit)); + R_TRY(m_page_table.Initialize(static_cast(params.flags), from_back, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit, this->GetSlabIndex())); } ON_RESULT_FAILURE_2 { m_page_table.Finalize(); }; @@ -378,7 +378,7 @@ namespace ams::kern { /* Setup page table. */ { const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0; - R_TRY(m_page_table.Initialize(static_cast(params.flags), from_back, pool, params.code_address, code_size, m_system_resource, res_limit)); + R_TRY(m_page_table.Initialize(static_cast(params.flags), from_back, pool, params.code_address, code_size, m_system_resource, res_limit, this->GetSlabIndex())); } ON_RESULT_FAILURE_2 { m_page_table.Finalize(); }; diff --git a/mesosphere/kernel/kernel.ld b/mesosphere/kernel/kernel.ld index 779488a356..bcd658572e 100644 --- a/mesosphere/kernel/kernel.ld +++ b/mesosphere/kernel/kernel.ld @@ -125,6 +125,8 @@ SECTIONS .gnu.version_r : { *(.gnu.version_r) } :rodata .note.gnu.build-id : { *(.note.gnu.build-id) } :rodata + __rodata_end = .; + /* =========== DATA section =========== */ . = ALIGN(0x1000); __data_start = . ; diff --git a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp index faf34576dc..10a000c6d8 100644 --- a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp +++ b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp @@ -15,6 +15,9 @@ */ #include +extern "C" void __rodata_start(); +extern "C" void __rodata_end(); + extern "C" void __bin_start__(); extern "C" void __bin_end__(); @@ -220,6 +223,31 @@ namespace ams::kern::init { }; static_assert(kern::arch::arm64::init::IsInitialPageAllocator); + void SetupAllTtbr0Entries(KInitialPageTable &init_pt, KInitialPageAllocator &allocator) { + /* Validate that the ttbr0 array is in rodata. */ + const uintptr_t rodata_start = reinterpret_cast(__rodata_start); + const uintptr_t rodata_end = reinterpret_cast(__rodata_end); + MESOSPHERE_INIT_ABORT_UNLESS(rodata_start < rodata_end); + MESOSPHERE_INIT_ABORT_UNLESS(rodata_start <= reinterpret_cast(std::addressof(KPageTable::GetTtbr0Entry(0)))); + MESOSPHERE_INIT_ABORT_UNLESS(reinterpret_cast(std::addressof(KPageTable::GetTtbr0Entry(KPageTable::NumTtbr0Entries))) < rodata_end); + + /* Allocate pages for all ttbr0 entries. */ + for (size_t i = 0; i < KPageTable::NumTtbr0Entries; ++i) { + /* Allocate a page. */ + KPhysicalAddress page = allocator.Allocate(PageSize); + MESOSPHERE_INIT_ABORT_UNLESS(page != Null); + + /* Check that the page is allowed to be a ttbr0 entry. */ + MESOSPHERE_INIT_ABORT_UNLESS((GetInteger(page) & UINT64_C(0xFFFF000000000001)) == 0); + + /* Get the physical address of the ttbr0 entry. */ + const auto ttbr0_phys_ptr = init_pt.GetPhysicalAddress(KVirtualAddress(std::addressof(KPageTable::GetTtbr0Entry(i)))); + + /* Set the entry to the newly allocated page. */ + *reinterpret_cast(GetInteger(ttbr0_phys_ptr)) = (static_cast(i) << 48) | GetInteger(page); + } + } + void FinalizeIdentityMapping(KInitialPageTable &init_pt, KInitialPageAllocator &allocator, u64 phys_to_virt_offset) { /* Create an allocator for identity mapping finalization. */ KInitialPageAllocatorForFinalizeIdentityMapping finalize_allocator(allocator, phys_to_virt_offset); @@ -591,6 +619,9 @@ namespace ams::kern::init { /* Create page table object for use during remaining initialization. */ KInitialPageTable init_pt; + /* Setup all ttbr0 pages. */ + SetupAllTtbr0Entries(init_pt, g_initial_page_allocator); + /* Unmap the identity mapping. */ FinalizeIdentityMapping(init_pt, g_initial_page_allocator, g_phase2_linear_region_phys_to_virt_diff); From 117da7ff37e9b4ca7d4326f4d67a15afb6069850 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 9 Oct 2024 15:12:25 -0700 Subject: [PATCH 11/35] kern: fix debug build --- .../source/arch/arm64/kern_k_debug.cpp | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_debug.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_debug.cpp index 89587597b6..87d7bf307f 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_debug.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_debug.cpp @@ -15,6 +15,28 @@ */ #include +/* */ +namespace ams::rocrt { + + constexpr inline const u32 ModuleHeaderVersion = util::FourCC<'M','O','D','0'>::Code; + + struct ModuleHeader { + u32 signature; + u32 dynamic_offset; + u32 bss_start_offset; + u32 bss_end_offset; + u32 exception_info_start_offset; + u32 exception_info_end_offset; + u32 module_offset; + }; + + struct ModuleHeaderLocation { + u32 pad; + u32 header_offset; + }; + +} + namespace ams::kern::arch::arm64 { namespace { @@ -657,7 +679,7 @@ namespace ams::kern::arch::arm64 { return PrintAddressWithModuleName(address, has_module_name, module_name, base_address); } - dyn_address = module.start_address + mod_offset + temp_32; + dyn_address = base_address + mod_offset + temp_32; } /* Locate tables inside .dyn. */ From e200dfb48c138e2346f31a0656b92ac7097dafc2 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 9 Oct 2024 15:12:46 -0700 Subject: [PATCH 12/35] kern: move KTargetSystem into .rodata, split init/verify --- .../nintendo/nx/kern_k_system_control.hpp | 1 + .../mesosphere/kern_k_system_control_base.hpp | 1 + .../mesosphere/kern_k_target_system.hpp | 49 +++++++++-------- .../nintendo/nx/kern_k_system_control.cpp | 53 +++++++++++++++---- .../source/kern_k_system_control_base.cpp | 29 +++------- .../source/kern_kernel_instantiations.cpp | 4 ++ 6 files changed, 83 insertions(+), 54 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp index a0cfce5a1e..8e18298490 100644 --- a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp +++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp @@ -45,6 +45,7 @@ namespace ams::kern::board::nintendo::nx { }; public: /* Initialization. */ + static NOINLINE void ConfigureKTargetSystem(); static NOINLINE void InitializePhase1(); static NOINLINE void InitializePhase2(); static NOINLINE u32 GetCreateProcessMemoryPool(); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_system_control_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_system_control_base.hpp index bb1c1ff0fc..ab3a18c5bb 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_system_control_base.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_system_control_base.hpp @@ -69,6 +69,7 @@ namespace ams::kern { static NOINLINE void InitializePhase1Base(u64 seed); public: /* Initialization. */ + static NOINLINE void ConfigureKTargetSystem(); static NOINLINE void InitializePhase1(); static NOINLINE void InitializePhase2(); static NOINLINE u32 GetCreateProcessMemoryPool(); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_target_system.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_target_system.hpp index 87b72c0fa0..24220bed04 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_target_system.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_target_system.hpp @@ -24,29 +24,36 @@ namespace ams::kern { friend class KSystemControlBase; friend class KSystemControl; private: - static inline constinit bool s_is_debug_mode; - static inline constinit bool s_enable_debug_logging; - static inline constinit bool s_enable_user_exception_handlers; - static inline constinit bool s_enable_debug_memory_fill; - static inline constinit bool s_enable_user_pmu_access; - static inline constinit bool s_enable_kernel_debugging; - static inline constinit bool s_enable_dynamic_resource_limits; + struct KTargetSystemData { + bool is_debug_mode; + bool enable_debug_logging; + bool enable_user_exception_handlers; + bool enable_debug_memory_fill; + bool enable_user_pmu_access; + bool enable_kernel_debugging; + bool enable_dynamic_resource_limits; + }; private: - static ALWAYS_INLINE void SetIsDebugMode(bool en) { s_is_debug_mode = en; } - static ALWAYS_INLINE void EnableDebugLogging(bool en) { s_enable_debug_logging = en; } - static ALWAYS_INLINE void EnableUserExceptionHandlers(bool en) { s_enable_user_exception_handlers = en; } - static ALWAYS_INLINE void EnableDebugMemoryFill(bool en) { s_enable_debug_memory_fill = en; } - static ALWAYS_INLINE void EnableUserPmuAccess(bool en) { s_enable_user_pmu_access = en; } - static ALWAYS_INLINE void EnableKernelDebugging(bool en) { s_enable_kernel_debugging = en; } - static ALWAYS_INLINE void EnableDynamicResourceLimits(bool en) { s_enable_dynamic_resource_limits = en; } + static inline constinit bool s_is_initialized = false; + static inline constinit const volatile KTargetSystemData s_data = { + .is_debug_mode = true, + .enable_debug_logging = true, + .enable_user_exception_handlers = true, + .enable_debug_memory_fill = true, + .enable_user_pmu_access = true, + .enable_kernel_debugging = true, + .enable_dynamic_resource_limits = false, + }; + private: + static ALWAYS_INLINE void SetInitialized() { s_is_initialized = true; } public: - static ALWAYS_INLINE bool IsDebugMode() { return s_is_debug_mode; } - static ALWAYS_INLINE bool IsDebugLoggingEnabled() { return s_enable_debug_logging; } - static ALWAYS_INLINE bool IsUserExceptionHandlersEnabled() { return s_enable_user_exception_handlers; } - static ALWAYS_INLINE bool IsDebugMemoryFillEnabled() { return s_enable_debug_memory_fill; } - static ALWAYS_INLINE bool IsUserPmuAccessEnabled() { return s_enable_user_pmu_access; } - static ALWAYS_INLINE bool IsKernelDebuggingEnabled() { return s_enable_kernel_debugging; } - static ALWAYS_INLINE bool IsDynamicResourceLimitsEnabled() { return s_enable_dynamic_resource_limits; } + static ALWAYS_INLINE bool IsDebugMode() { return s_is_initialized && s_data.is_debug_mode; } + static ALWAYS_INLINE bool IsDebugLoggingEnabled() { return s_is_initialized && s_data.enable_debug_logging; } + static ALWAYS_INLINE bool IsUserExceptionHandlersEnabled() { return s_is_initialized && s_data.enable_user_exception_handlers; } + static ALWAYS_INLINE bool IsDebugMemoryFillEnabled() { return s_is_initialized && s_data.enable_debug_memory_fill; } + static ALWAYS_INLINE bool IsUserPmuAccessEnabled() { return s_is_initialized && s_data.enable_user_pmu_access; } + static ALWAYS_INLINE bool IsKernelDebuggingEnabled() { return s_is_initialized && s_data.enable_kernel_debugging; } + static ALWAYS_INLINE bool IsDynamicResourceLimitsEnabled() { return s_is_initialized && s_data.enable_dynamic_resource_limits; } }; } \ No newline at end of file diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp index 52a894d1ca..8050f78e0a 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp @@ -26,7 +26,7 @@ namespace ams::kern::board::nintendo::nx { constexpr size_t SecureSizeMax = util::AlignDown(512_MB - 1, SecureAlignment); /* Global variables for panic. */ - constinit bool g_call_smc_on_panic; + constinit const volatile bool g_call_smc_on_panic = false; /* Global variables for secure memory. */ constinit KSpinLock g_secure_applet_lock; @@ -401,34 +401,67 @@ namespace ams::kern::board::nintendo::nx { } /* System Initialization. */ - void KSystemControl::InitializePhase1() { + void KSystemControl::ConfigureKTargetSystem() { /* Configure KTargetSystem. */ + volatile auto *ts = const_cast(std::addressof(KTargetSystem::s_data)); { /* Set IsDebugMode. */ { - KTargetSystem::SetIsDebugMode(GetConfigBool(smc::ConfigItem::IsDebugMode)); + ts->is_debug_mode = GetConfigBool(smc::ConfigItem::IsDebugMode); /* If debug mode, we want to initialize uart logging. */ - KTargetSystem::EnableDebugLogging(KTargetSystem::IsDebugMode()); + ts->enable_debug_logging = ts->is_debug_mode; } /* Set Kernel Configuration. */ { const auto kernel_config = util::BitPack32{GetConfigU32(smc::ConfigItem::KernelConfiguration)}; - KTargetSystem::EnableDebugMemoryFill(kernel_config.Get()); - KTargetSystem::EnableUserExceptionHandlers(kernel_config.Get()); - KTargetSystem::EnableDynamicResourceLimits(!kernel_config.Get()); - KTargetSystem::EnableUserPmuAccess(kernel_config.Get()); + ts->enable_debug_memory_fill = kernel_config.Get(); + ts->enable_user_exception_handlers = kernel_config.Get(); + ts->enable_dynamic_resource_limits = !kernel_config.Get(); + ts->enable_user_pmu_access = kernel_config.Get(); - g_call_smc_on_panic = kernel_config.Get(); + /* Configure call smc on panic. */ + *const_cast(std::addressof(g_call_smc_on_panic)) = kernel_config.Get(); } /* Set Kernel Debugging. */ { /* NOTE: This is used to restrict access to SvcKernelDebug/SvcChangeKernelTraceState. */ /* Mesosphere may wish to not require this, as we'd ideally keep ProgramVerification enabled for userland. */ - KTargetSystem::EnableKernelDebugging(GetConfigBool(smc::ConfigItem::DisableProgramVerification)); + ts->enable_kernel_debugging = GetConfigBool(smc::ConfigItem::DisableProgramVerification); + } + } + } + + void KSystemControl::InitializePhase1() { + /* Enable KTargetSystem. */ + KTargetSystem::SetInitialized(); + + /* Check KTargetSystem was configured correctly. */ + { + /* Check IsDebugMode. */ + { + MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsDebugMode() == GetConfigBool(smc::ConfigItem::IsDebugMode)); + MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsDebugLoggingEnabled() == GetConfigBool(smc::ConfigItem::IsDebugMode)); + } + + /* Check Kernel Configuration. */ + { + const auto kernel_config = util::BitPack32{GetConfigU32(smc::ConfigItem::KernelConfiguration)}; + + MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsDebugMemoryFillEnabled() == kernel_config.Get()); + MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsUserExceptionHandlersEnabled() == kernel_config.Get()); + MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled() == !kernel_config.Get()); + MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsUserPmuAccessEnabled() == kernel_config.Get()); + + MESOSPHERE_ABORT_UNLESS(g_call_smc_on_panic == kernel_config.Get()); + } + + /* Check Kernel Debugging. */ + { + MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsKernelDebuggingEnabled() == GetConfigBool(smc::ConfigItem::DisableProgramVerification)); } } diff --git a/libraries/libmesosphere/source/kern_k_system_control_base.cpp b/libraries/libmesosphere/source/kern_k_system_control_base.cpp index fe0b569964..025ec4f9cd 100644 --- a/libraries/libmesosphere/source/kern_k_system_control_base.cpp +++ b/libraries/libmesosphere/source/kern_k_system_control_base.cpp @@ -124,31 +124,14 @@ namespace ams::kern { } /* System Initialization. */ + void KSystemControlBase::ConfigureKTargetSystem() { + /* By default, use the default config set in the KTargetSystem header. */ + } + void KSystemControlBase::InitializePhase1() { - /* Configure KTargetSystem. */ + /* Enable KTargetSystem. */ { - /* Set IsDebugMode. */ - { - KTargetSystem::SetIsDebugMode(true); - - /* If debug mode, we want to initialize uart logging. */ - KTargetSystem::EnableDebugLogging(true); - } - - /* Set Kernel Configuration. */ - { - KTargetSystem::EnableDebugMemoryFill(false); - KTargetSystem::EnableUserExceptionHandlers(true); - KTargetSystem::EnableDynamicResourceLimits(true); - KTargetSystem::EnableUserPmuAccess(false); - } - - /* Set Kernel Debugging. */ - { - /* NOTE: This is used to restrict access to SvcKernelDebug/SvcChangeKernelTraceState. */ - /* Mesosphere may wish to not require this, as we'd ideally keep ProgramVerification enabled for userland. */ - KTargetSystem::EnableKernelDebugging(true); - } + KTargetSystem::SetInitialized(); } /* Initialize random and resource limit. */ diff --git a/mesosphere/kernel/source/kern_kernel_instantiations.cpp b/mesosphere/kernel/source/kern_kernel_instantiations.cpp index 6adff07549..d01ddd151e 100644 --- a/mesosphere/kernel/source/kern_kernel_instantiations.cpp +++ b/mesosphere/kernel/source/kern_kernel_instantiations.cpp @@ -111,4 +111,8 @@ namespace ams::kern { KThread &Kernel::GetMainThread(s32 core_id) { return g_main_threads.m_arr[core_id]; } KThread &Kernel::GetIdleThread(s32 core_id) { return g_idle_threads.m_arr[core_id]; } + __attribute__((constructor)) void ConfigureKTargetSystem() { + KSystemControl::ConfigureKTargetSystem(); + } + } From 52bc54205bfadb3e76a4cc52dc031839414c46d1 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 9 Oct 2024 15:20:32 -0700 Subject: [PATCH 13/35] kern: simplify KProcess max memory calculation --- libraries/libmesosphere/source/kern_k_process.cpp | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/libraries/libmesosphere/source/kern_k_process.cpp b/libraries/libmesosphere/source/kern_k_process.cpp index 96d82f975b..24c6baef52 100644 --- a/libraries/libmesosphere/source/kern_k_process.cpp +++ b/libraries/libmesosphere/source/kern_k_process.cpp @@ -220,18 +220,8 @@ namespace ams::kern { m_running_thread_switch_counts[i] = 0; } - /* Set max memory based on address space type. */ - switch ((params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask)) { - case ams::svc::CreateProcessFlag_AddressSpace32Bit: - case ams::svc::CreateProcessFlag_AddressSpace64BitDeprecated: - case ams::svc::CreateProcessFlag_AddressSpace64Bit: - m_max_process_memory = m_page_table.GetHeapRegionSize(); - break; - case ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias: - m_max_process_memory = m_page_table.GetHeapRegionSize() + m_page_table.GetAliasRegionSize(); - break; - MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); - } + /* Set max memory. */ + m_max_process_memory = m_page_table.GetHeapRegionSize(); /* Generate random entropy. */ KSystemControl::GenerateRandom(m_entropy, util::size(m_entropy)); From dfff4508fa3fc8d4c3b441a3aeb8b32bcb598e04 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 9 Oct 2024 16:50:20 -0700 Subject: [PATCH 14/35] kern/strat: update for new DebugFlags capability semantics --- config_templates/exosphere.ini | 2 +- .../arch/arm64/kern_k_process_page_table.hpp | 4 +- .../mesosphere/kern_k_capabilities.hpp | 11 ++- .../include/mesosphere/kern_k_debug_base.hpp | 5 ++ .../mesosphere/kern_k_page_table_base.hpp | 4 +- .../include/mesosphere/kern_k_process.hpp | 4 + .../source/kern_k_capabilities.cpp | 7 ++ .../source/kern_k_debug_base.cpp | 81 ++++--------------- .../source/kern_k_page_table_base.cpp | 38 ++++++--- .../source/svc/kern_svc_debug.cpp | 23 +++++- .../source/svc/kern_svc_process.cpp | 3 + .../source/svc/kern_svc_thread.cpp | 45 ++++++----- stratosphere/creport/creport.json | 1 + stratosphere/dmnt.gen2/dmnt.gen2.json | 1 + stratosphere/fatal/fatal.json | 1 + .../loader/source/ldr_capabilities.cpp | 19 ++++- stratosphere/pm/pm.json | 1 + 17 files changed, 141 insertions(+), 109 deletions(-) diff --git a/config_templates/exosphere.ini b/config_templates/exosphere.ini index e8dc823774..8df772ccf9 100644 --- a/config_templates/exosphere.ini +++ b/config_templates/exosphere.ini @@ -1,6 +1,6 @@ # Key: debugmode, default: 1. # Desc: Controls whether kernel is debug mode. -# Disabling this may break Atmosphere's debugger in a future release. +# Disabling this will break Atmosphere. # Key: debugmode_user, default: 0. # Desc: Controls whether userland is debug mode. diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp index 3ad3c96a1c..e18a6fbc82 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp @@ -154,8 +154,8 @@ namespace ams::kern::arch::arm64 { R_RETURN(m_page_table.InvalidateCurrentProcessDataCache(address, size)); } - Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) { - R_RETURN(m_page_table.ReadDebugMemory(buffer, address, size)); + Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size, bool force_debug_prod) { + R_RETURN(m_page_table.ReadDebugMemory(buffer, address, size, force_debug_prod)); } Result ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size, KMemoryState state) { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp index 7c2f5dbcdf..2c74b835cd 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp @@ -168,9 +168,10 @@ namespace ams::kern { struct DebugFlags { using IdBits = Field<0, CapabilityId + 1>; - DEFINE_FIELD(AllowDebug, IdBits, 1, bool); - DEFINE_FIELD(ForceDebug, AllowDebug, 1, bool); - DEFINE_FIELD(Reserved, ForceDebug, 13); + DEFINE_FIELD(AllowDebug, IdBits, 1, bool); + DEFINE_FIELD(ForceDebugProd, AllowDebug, 1, bool); + DEFINE_FIELD(ForceDebug, ForceDebugProd, 1, bool); + DEFINE_FIELD(Reserved, ForceDebug, 12); }; #undef DEFINE_FIELD @@ -255,6 +256,10 @@ namespace ams::kern { return m_debug_capabilities.Get(); } + constexpr bool CanForceDebugProd() const { + return m_debug_capabilities.Get(); + } + constexpr bool CanForceDebug() const { return m_debug_capabilities.Get(); } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_debug_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_debug_base.hpp index 6c8fe54f7c..6978b91522 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_debug_base.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_debug_base.hpp @@ -32,6 +32,7 @@ namespace ams::kern { KLightLock m_lock; KProcess::State m_old_process_state; bool m_is_attached; + bool m_is_force_debug_prod; public: explicit KDebugBase() { /* ... */ } protected: @@ -62,6 +63,10 @@ namespace ams::kern { return m_is_attached; } + ALWAYS_INLINE bool IsForceDebugProd() const { + return m_is_force_debug_prod; + } + ALWAYS_INLINE bool OpenProcess() { return m_process_holder.Open(); } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp index 8da4311df4..7b2c722e1b 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp @@ -328,6 +328,8 @@ namespace ams::kern { R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr)); } + bool CanReadWriteDebugMemory(KProcessAddress addr, size_t size, bool force_debug_prod); + Result LockMemoryAndOpen(KPageGroup *out_pg, KPhysicalAddress *out_paddr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr); Result UnlockMemory(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr, const KPageGroup *pg); @@ -421,7 +423,7 @@ namespace ams::kern { Result InvalidateProcessDataCache(KProcessAddress address, size_t size); Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size); - Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size); + Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size, bool force_debug_prod); Result ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size, KMemoryState state); Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp index c4ab7f2a58..849f73dabb 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp @@ -206,6 +206,10 @@ namespace ams::kern { return m_capabilities.IsPermittedDebug(); } + constexpr bool CanForceDebugProd() const { + return m_capabilities.CanForceDebugProd(); + } + constexpr bool CanForceDebug() const { return m_capabilities.CanForceDebug(); } diff --git a/libraries/libmesosphere/source/kern_k_capabilities.cpp b/libraries/libmesosphere/source/kern_k_capabilities.cpp index feb245edac..ba03592623 100644 --- a/libraries/libmesosphere/source/kern_k_capabilities.cpp +++ b/libraries/libmesosphere/source/kern_k_capabilities.cpp @@ -262,7 +262,14 @@ namespace ams::kern { /* Validate. */ R_UNLESS(cap.Get() == 0, svc::ResultReservedUsed()); + u32 total = 0; + if (cap.Get()) { ++total; } + if (cap.Get()) { ++total; } + if (cap.Get()) { ++total; } + R_UNLESS(total <= 1, svc::ResultInvalidCombination()); + m_debug_capabilities.Set(cap.Get()); + m_debug_capabilities.Set(cap.Get()); m_debug_capabilities.Set(cap.Get()); R_SUCCEED(); } diff --git a/libraries/libmesosphere/source/kern_k_debug_base.cpp b/libraries/libmesosphere/source/kern_k_debug_base.cpp index 418d804f3e..635673cd24 100644 --- a/libraries/libmesosphere/source/kern_k_debug_base.cpp +++ b/libraries/libmesosphere/source/kern_k_debug_base.cpp @@ -27,7 +27,8 @@ namespace ams::kern { void KDebugBase::Initialize() { /* Clear the continue flags. */ - m_continue_flags = 0; + m_continue_flags = 0; + m_is_force_debug_prod = GetCurrentProcess().CanForceDebugProd(); } bool KDebugBase::Is64Bit() const { @@ -120,8 +121,11 @@ namespace ams::kern { /* Read the memory. */ if (info.GetSvcState() != ams::svc::MemoryState_Io) { /* The memory is normal memory. */ - R_TRY(target_pt.ReadDebugMemory(GetVoidPointer(buffer), cur_address, cur_size)); + R_TRY(target_pt.ReadDebugMemory(GetVoidPointer(buffer), cur_address, cur_size, this->IsForceDebugProd())); } else { + /* Only allow IO memory to be read if not force debug prod. */ + R_UNLESS(!this->IsForceDebugProd(), svc::ResultInvalidCurrentMemory()); + /* The memory is IO memory. */ R_TRY(target_pt.ReadDebugIoMemory(GetVoidPointer(buffer), cur_address, cur_size, info.GetState())); } @@ -269,6 +273,9 @@ namespace ams::kern { switch (state) { case KProcess::State_Created: case KProcess::State_Running: + /* Created and running processes can only be debugged if the debugger is not ForceDebugProd. */ + R_UNLESS(!this->IsForceDebugProd(), svc::ResultInvalidState()); + break; case KProcess::State_Crashed: break; case KProcess::State_CreatedAttached: @@ -408,69 +415,6 @@ namespace ams::kern { /* Get the process pointer. */ KProcess * const target = this->GetProcessUnsafe(); - /* Detach from the process. */ - { - /* Lock both ourselves and the target process. */ - KScopedLightLock state_lk(target->GetStateLock()); - KScopedLightLock list_lk(target->GetListLock()); - KScopedLightLock this_lk(m_lock); - - /* Check that we're still attached. */ - if (this->IsAttached()) { - /* Lock the scheduler. */ - KScopedSchedulerLock sl; - - /* Get the process's state. */ - const KProcess::State state = target->GetState(); - - /* Check that the process is in a state where we can terminate it. */ - R_UNLESS(state != KProcess::State_Created, svc::ResultInvalidState()); - R_UNLESS(state != KProcess::State_CreatedAttached, svc::ResultInvalidState()); - - /* Decide on a new state for the process. */ - KProcess::State new_state; - if (state == KProcess::State_RunningAttached) { - /* If the process is running, transition it accordingly. */ - new_state = KProcess::State_Running; - } else if (state == KProcess::State_DebugBreak) { - /* If the process is debug breaked, transition it accordingly. */ - new_state = KProcess::State_Crashed; - - /* Suspend all the threads in the process. */ - { - auto end = target->GetThreadList().end(); - for (auto it = target->GetThreadList().begin(); it != end; ++it) { - /* Request that we suspend the thread. */ - it->RequestSuspend(KThread::SuspendType_Debug); - } - } - } else { - /* Otherwise, don't transition. */ - new_state = state; - } - - #if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP) - /* Clear single step on all threads. */ - { - auto end = target->GetThreadList().end(); - for (auto it = target->GetThreadList().begin(); it != end; ++it) { - it->ClearHardwareSingleStep(); - } - } - #endif - - /* Detach from the process. */ - target->ClearDebugObject(new_state); - m_is_attached = false; - - /* Close the initial reference opened to our process. */ - this->CloseProcess(); - - /* Clear our continue flags. */ - m_continue_flags = 0; - } - } - /* Terminate the process. */ target->Terminate(); @@ -962,7 +906,12 @@ namespace ams::kern { case ams::svc::DebugException_UndefinedInstruction: { MESOSPHERE_ASSERT(info->info.exception.exception_data_count == 1); - out->info.exception.specific.undefined_instruction.insn = info->info.exception.exception_data[0]; + /* Only save the instruction if the caller is not force debug prod. */ + if (this->IsForceDebugProd()) { + out->info.exception.specific.undefined_instruction.insn = 0; + } else { + out->info.exception.specific.undefined_instruction.insn = info->info.exception.exception_data[0]; + } } break; case ams::svc::DebugException_BreakPoint: diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index 95bc983cac..2a9402ecc5 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -2695,18 +2695,37 @@ namespace ams::kern { R_RETURN(cpu::InvalidateDataCache(GetVoidPointer(address), size)); } - Result KPageTableBase::ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) { + bool KPageTableBase::CanReadWriteDebugMemory(KProcessAddress address, size_t size, bool force_debug_prod) { + /* Check pre-conditions. */ + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + /* If the memory is debuggable and user-readable, we can perform the access. */ + if (R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagCanDebug, KMemoryState_FlagCanDebug, KMemoryPermission_NotMapped | KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None))) { + return true; + } + + /* If we're in debug mode, and the process isn't force debug prod, check if the memory is debuggable and kernel-readable and user-executable. */ + if (KTargetSystem::IsDebugMode() && !force_debug_prod) { + if (R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagCanDebug, KMemoryState_FlagCanDebug, KMemoryPermission_KernelRead | KMemoryPermission_UserExecute, KMemoryPermission_KernelRead | KMemoryPermission_UserExecute, KMemoryAttribute_None, KMemoryAttribute_None))) { + return true; + } + } + + /* If neither of the above checks passed, we can't access the memory. */ + return false; + } + + Result KPageTableBase::ReadDebugMemory(void *buffer, KProcessAddress address, size_t size, bool force_debug_prod) { /* Lightly validate the region is in range. */ R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); /* Lock the table. */ KScopedLightLock lk(m_general_lock); - /* Require that the memory either be user readable or debuggable. */ - const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None)); + /* Require that the memory either be user-readable-and-mapped or debug-accessible. */ + const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_NotMapped | KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None)); if (!can_read) { - const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagCanDebug, KMemoryState_FlagCanDebug, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); - R_UNLESS(can_debug, svc::ResultInvalidCurrentMemory()); + R_UNLESS(this->CanReadWriteDebugMemory(address, size, force_debug_prod), svc::ResultInvalidCurrentMemory()); } /* Get the impl. */ @@ -2788,11 +2807,10 @@ namespace ams::kern { /* Lock the table. */ KScopedLightLock lk(m_general_lock); - /* Require that the memory either be user writable or debuggable. */ - const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None)); - if (!can_read) { - const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagCanDebug, KMemoryState_FlagCanDebug, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); - R_UNLESS(can_debug, svc::ResultInvalidCurrentMemory()); + /* Require that the memory either be user-writable-and-mapped or debug-accessible. */ + const bool can_write = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_NotMapped | KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None)); + if (!can_write) { + R_UNLESS(this->CanReadWriteDebugMemory(address, size, false), svc::ResultInvalidCurrentMemory()); } /* Get the impl. */ diff --git a/libraries/libmesosphere/source/svc/kern_svc_debug.cpp b/libraries/libmesosphere/source/svc/kern_svc_debug.cpp index 3698853553..986654ce4e 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_debug.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_debug.cpp @@ -24,6 +24,9 @@ namespace ams::kern::svc { constexpr inline int32_t MaximumDebuggableThreadCount = 0x60; Result DebugActiveProcess(ams::svc::Handle *out_handle, uint64_t process_id) { + /* Check that the SVC can be used. */ + R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented()); + /* Get the process from its id. */ KProcess *process = KProcess::GetProcessFromId(process_id); R_UNLESS(process != nullptr, svc::ResultInvalidProcessId()); @@ -32,9 +35,8 @@ namespace ams::kern::svc { ON_SCOPE_EXIT { process->Close(); }; /* Check that the debugging is allowed. */ - if (!process->IsPermittedDebug()) { - R_UNLESS(GetCurrentProcess().CanForceDebug(), svc::ResultInvalidState()); - } + const bool allowable = process->IsPermittedDebug() || GetCurrentProcess().CanForceDebug() || GetCurrentProcess().CanForceDebugProd(); + R_UNLESS(allowable, svc::ResultInvalidState()); /* Disallow debugging one's own processs, to prevent softlocks. */ R_UNLESS(process != GetCurrentProcessPointer(), svc::ResultInvalidState()); @@ -92,6 +94,9 @@ namespace ams::kern::svc { template Result GetDebugEvent(KUserPointer out_info, ams::svc::Handle debug_handle) { + /* Only allow invoking the svc on development hardware or if force debug prod. */ + R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented()); + /* Get the debug object. */ KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject(debug_handle); R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle()); @@ -164,6 +169,9 @@ namespace ams::kern::svc { } Result GetDebugThreadContext(KUserPointer out_context, ams::svc::Handle debug_handle, uint64_t thread_id, uint32_t context_flags) { + /* Only allow invoking the svc on development hardware or if force debug prod. */ + R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented()); + /* Validate the context flags. */ R_UNLESS((context_flags | ams::svc::ThreadContextFlag_All) == ams::svc::ThreadContextFlag_All, svc::ResultInvalidEnumValue()); @@ -220,6 +228,9 @@ namespace ams::kern::svc { } Result QueryDebugProcessMemory(ams::svc::MemoryInfo *out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle debug_handle, uintptr_t address) { + /* Only allow invoking the svc on development hardware or if force debug prod. */ + R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented()); + /* Get the debug object. */ KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject(debug_handle); R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle()); @@ -261,6 +272,9 @@ namespace ams::kern::svc { } Result ReadDebugProcessMemory(uintptr_t buffer, ams::svc::Handle debug_handle, uintptr_t address, size_t size) { + /* Only allow invoking the svc on development hardware or if force debug prod. */ + R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented()); + /* Validate address / size. */ R_UNLESS(size > 0, svc::ResultInvalidSize()); R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory()); @@ -306,6 +320,9 @@ namespace ams::kern::svc { } Result GetDebugThreadParam(uint64_t *out_64, uint32_t *out_32, ams::svc::Handle debug_handle, uint64_t thread_id, ams::svc::DebugThreadParam param) { + /* Only allow invoking the svc on development hardware or if force debug prod. */ + R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented()); + /* Get the debug object. */ KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject(debug_handle); R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle()); diff --git a/libraries/libmesosphere/source/svc/kern_svc_process.cpp b/libraries/libmesosphere/source/svc/kern_svc_process.cpp index 5141a2c312..98c8740f2f 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_process.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_process.cpp @@ -71,6 +71,9 @@ namespace ams::kern::svc { } Result GetProcessList(int32_t *out_num_processes, KUserPointer out_process_ids, int32_t max_out_count) { + /* Only allow invoking the svc on development hardware. */ + R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented()); + /* Validate that the out count is valid. */ R_UNLESS((0 <= max_out_count && max_out_count <= static_cast(std::numeric_limits::max() / sizeof(u64))), svc::ResultOutOfRange()); diff --git a/libraries/libmesosphere/source/svc/kern_svc_thread.cpp b/libraries/libmesosphere/source/svc/kern_svc_thread.cpp index 99c8c37a86..6f74dd145f 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_thread.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_thread.cpp @@ -217,6 +217,9 @@ namespace ams::kern::svc { } Result GetThreadList(int32_t *out_num_threads, KUserPointer out_thread_ids, int32_t max_out_count, ams::svc::Handle debug_handle) { + /* Only allow invoking the svc on development hardware. */ + R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented()); + /* Validate that the out count is valid. */ R_UNLESS((0 <= max_out_count && max_out_count <= static_cast(std::numeric_limits::max() / sizeof(u64))), svc::ResultOutOfRange()); @@ -225,30 +228,34 @@ namespace ams::kern::svc { R_UNLESS(GetCurrentProcess().GetPageTable().Contains(KProcessAddress(out_thread_ids.GetUnsafePointer()), max_out_count * sizeof(u64)), svc::ResultInvalidCurrentMemory()); } - if (debug_handle == ams::svc::InvalidHandle) { - /* If passed invalid handle, we should return the global thread list. */ - R_TRY(KThread::GetThreadList(out_num_threads, out_thread_ids, max_out_count)); - } else { - /* Get the handle table. */ - auto &handle_table = GetCurrentProcess().GetHandleTable(); + /* Get the handle table. */ + auto &handle_table = GetCurrentProcess().GetHandleTable(); - /* Try to get as a debug object. */ - KScopedAutoObject debug = handle_table.GetObject(debug_handle); - if (debug.IsNotNull()) { - /* Check that the debug object has a process. */ - R_UNLESS(debug->IsAttached(), svc::ResultProcessTerminated()); - R_UNLESS(debug->OpenProcess(), svc::ResultProcessTerminated()); - ON_SCOPE_EXIT { debug->CloseProcess(); }; + /* Try to get as a debug object. */ + KScopedAutoObject debug = handle_table.GetObject(debug_handle); + if (debug.IsNotNull()) { + /* Check that the debug object has a process. */ + R_UNLESS(debug->IsAttached(), svc::ResultProcessTerminated()); + R_UNLESS(debug->OpenProcess(), svc::ResultProcessTerminated()); + ON_SCOPE_EXIT { debug->CloseProcess(); }; - /* Get the thread list. */ - R_TRY(debug->GetProcessUnsafe()->GetThreadList(out_num_threads, out_thread_ids, max_out_count)); - } else { - /* Try to get as a process. */ - KScopedAutoObject process = handle_table.GetObjectWithoutPseudoHandle(debug_handle); - R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); + /* Get the thread list. */ + R_TRY(debug->GetProcessUnsafe()->GetThreadList(out_num_threads, out_thread_ids, max_out_count)); + } else { + /* Only allow getting as a process (or global) if the caller does not have ForceDebugProd. */ + R_UNLESS(!GetCurrentProcess().CanForceDebugProd(), svc::ResultInvalidHandle()); + /* Try to get as a process. */ + KScopedAutoObject process = handle_table.GetObjectWithoutPseudoHandle(debug_handle); + if (process.IsNotNull()) { /* Get the thread list. */ R_TRY(process->GetThreadList(out_num_threads, out_thread_ids, max_out_count)); + } else { + /* If the object is not a process, the caller may want the global thread list. */ + R_UNLESS(debug_handle == ams::svc::InvalidHandle, svc::ResultInvalidHandle()); + + /* If passed invalid handle, we should return the global thread list. */ + R_TRY(KThread::GetThreadList(out_num_threads, out_thread_ids, max_out_count)); } } diff --git a/stratosphere/creport/creport.json b/stratosphere/creport/creport.json index cdd28371d2..99859c3d2b 100644 --- a/stratosphere/creport/creport.json +++ b/stratosphere/creport/creport.json @@ -110,6 +110,7 @@ "type": "debug_flags", "value": { "allow_debug": false, + "force_debug_prod": false, "force_debug": true } } diff --git a/stratosphere/dmnt.gen2/dmnt.gen2.json b/stratosphere/dmnt.gen2/dmnt.gen2.json index 918f1262c5..e0d9707890 100644 --- a/stratosphere/dmnt.gen2/dmnt.gen2.json +++ b/stratosphere/dmnt.gen2/dmnt.gen2.json @@ -105,6 +105,7 @@ "type": "debug_flags", "value": { "allow_debug": false, + "force_debug_prod": false, "force_debug": true } }] diff --git a/stratosphere/fatal/fatal.json b/stratosphere/fatal/fatal.json index 45b7180892..aef307a681 100644 --- a/stratosphere/fatal/fatal.json +++ b/stratosphere/fatal/fatal.json @@ -104,6 +104,7 @@ "type": "debug_flags", "value": { "allow_debug": false, + "force_debug_prod": false, "force_debug": true } }] diff --git a/stratosphere/loader/source/ldr_capabilities.cpp b/stratosphere/loader/source/ldr_capabilities.cpp index 6835267b38..cd93d855f5 100644 --- a/stratosphere/loader/source/ldr_capabilities.cpp +++ b/stratosphere/loader/source/ldr_capabilities.cpp @@ -308,10 +308,19 @@ namespace ams::ldr { ); DEFINE_CAPABILITY_CLASS(DebugFlags, - DEFINE_CAPABILITY_FIELD(AllowDebug, IdBits, 1, bool); - DEFINE_CAPABILITY_FIELD(ForceDebug, AllowDebug, 1, bool); + DEFINE_CAPABILITY_FIELD(AllowDebug, IdBits, 1, bool); + DEFINE_CAPABILITY_FIELD(ForceDebugProd, AllowDebug, 1, bool); + DEFINE_CAPABILITY_FIELD(ForceDebug, ForceDebugProd, 1, bool); bool IsValid(const util::BitPack32 *kac, size_t kac_count) const { + u32 total = 0; + if (this->GetAllowDebug()) { ++total; } + if (this->GetForceDebugProd()) { ++total; } + if (this->GetForceDebug()) { ++total; } + if (total > 1) { + return false; + } + for (size_t i = 0; i < kac_count; i++) { if (GetCapabilityId(kac[i]) == Id) { const auto restriction = Decode(kac[i]); @@ -319,12 +328,14 @@ namespace ams::ldr { return (restriction.GetValue() & this->GetValue()) == this->GetValue(); } } + return false; } - static constexpr util::BitPack32 Encode(bool allow_debug, bool force_debug) { + static constexpr util::BitPack32 Encode(bool allow_debug, bool force_debug_prod, bool force_debug) { util::BitPack32 encoded{IdBitsValue}; encoded.Set(allow_debug); + encoded.Set(force_debug_prod); encoded.Set(force_debug); return encoded; } @@ -406,7 +417,7 @@ namespace ams::ldr { kac[i] = CapabilityApplicationType::Encode(flags & ProgramInfoFlag_ApplicationTypeMask); break; case CapabilityId::DebugFlags: - kac[i] = CapabilityDebugFlags::Encode((flags & ProgramInfoFlag_AllowDebug) != 0, CapabilityDebugFlags::Decode(cap).GetForceDebug()); + kac[i] = CapabilityDebugFlags::Encode((flags & ProgramInfoFlag_AllowDebug) != 0, CapabilityDebugFlags::Decode(cap).GetForceDebugProd(), CapabilityDebugFlags::Decode(cap).GetForceDebug()); break; default: break; diff --git a/stratosphere/pm/pm.json b/stratosphere/pm/pm.json index 005504b727..4ffe5e94e2 100644 --- a/stratosphere/pm/pm.json +++ b/stratosphere/pm/pm.json @@ -85,6 +85,7 @@ "type": "debug_flags", "value": { "allow_debug": false, + "force_debug_prod": false, "force_debug": true } } From 12f7c95c5d135feeac35f2304635604d05b4ac9b Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 9 Oct 2024 17:42:02 -0700 Subject: [PATCH 15/35] kern/ldr: add support for --x executables --- .../arch/arm64/kern_k_page_table.hpp | 21 +++++++++----- .../arm64/kern_userspace_memory_access.hpp | 11 +++++++ .../arch/arm64/kern_exception_handlers.cpp | 29 ++----------------- .../arm64/kern_userspace_memory_access_asm.s | 15 ++++++++++ .../source/kern_k_page_table_base.cpp | 5 ++++ .../source/svc/kern_svc_process_memory.cpp | 1 + .../include/stratosphere/ldr/ldr_types.hpp | 1 + .../os_process_memory_impl.os.horizon.cpp | 1 + .../loader/source/ldr_process_creation.cpp | 10 +++---- 9 files changed, 54 insertions(+), 40 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp index 370211b2bd..2ccd4b72ae 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp @@ -109,6 +109,9 @@ namespace ams::kern::arch::arm64 { KPageTableManager &GetPageTableManager() const { return *m_manager; } private: constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const { + /* Check that the property is not kernel execute. */ + MESOSPHERE_ABORT_UNLESS((properties.perm & KMemoryPermission_KernelExecute) == 0); + /* Set basic attributes. */ PageTableEntry entry{PageTableEntry::ExtensionFlag_Valid}; entry.SetPrivilegedExecuteNever(true); @@ -122,22 +125,24 @@ namespace ams::kern::arch::arm64 { /* Set page attribute. */ if (properties.io) { MESOSPHERE_ABORT_UNLESS(!properties.uncached); - MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0); + MESOSPHERE_ABORT_UNLESS((properties.perm & KMemoryPermission_UserExecute) == 0); entry.SetPageAttribute(PageTableEntry::PageAttribute_Device_nGnRnE) .SetUserExecuteNever(true); } else if (properties.uncached) { - MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0); + MESOSPHERE_ABORT_UNLESS((properties.perm & KMemoryPermission_UserExecute) == 0); - entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemoryNotCacheable); + entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemoryNotCacheable) + .SetUserExecuteNever(true); } else { entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemory); - } - /* Set user execute never bit. */ - if (properties.perm != KMemoryPermission_UserReadExecute) { - MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0); - entry.SetUserExecuteNever(true); + if ((properties.perm & KMemoryPermission_UserExecute) != 0) { + /* Check that the permission is either r--/--x or r--/r-x. */ + MESOSPHERE_ABORT_UNLESS((properties.perm & ~ams::svc::MemoryPermission_Read) == (KMemoryPermission_KernelRead | KMemoryPermission_UserExecute)); + } else { + entry.SetUserExecuteNever(true); + } } /* Set AP[1] based on perm. */ diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp index 9e0c1844eb..6afabe51c7 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp @@ -21,7 +21,18 @@ namespace ams::kern::arch::arm64 { void UserspaceAccessFunctionAreaBegin(); class UserspaceAccess { + private: + static bool CopyMemoryFromUserSize32BitWithSupervisorAccessImpl(void *dst, const void *src); public: + static bool CopyMemoryFromUserSize32BitWithSupervisorAccess(void *dst, const void *src) { + /* Check that the address is within the valid userspace range. */ + if (const uintptr_t src_uptr = reinterpret_cast(src); src_uptr < ams::svc::AddressNullGuard32Size || (src_uptr + sizeof(u32) - 1) >= ams::svc::AddressMemoryRegion39Size) { + return false; + } + + return CopyMemoryFromUserSize32BitWithSupervisorAccessImpl(dst, src); + } + static bool CopyMemoryFromUser(void *dst, const void *src, size_t size); static bool CopyMemoryFromUserAligned32Bit(void *dst, const void *src, size_t size); static bool CopyMemoryFromUserAligned64Bit(void *dst, const void *src, size_t size); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp b/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp index d55e217e3a..0fc25c741e 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp @@ -100,6 +100,8 @@ namespace ams::kern::arch::arm64 { u32 insn_value = 0; if (UserspaceAccess::CopyMemoryFromUser(std::addressof(insn_value), reinterpret_cast(context->pc), sizeof(insn_value))) { insn = insn_value; + } else if (KTargetSystem::IsDebugMode() && (context->pc & 3) == 0 && UserspaceAccess::CopyMemoryFromUserSize32BitWithSupervisorAccess(std::addressof(insn_value), reinterpret_cast(context->pc))) { + insn = insn_value; } else { insn = 0; } @@ -112,33 +114,6 @@ namespace ams::kern::arch::arm64 { bool should_process_user_exception = KTargetSystem::IsUserExceptionHandlersEnabled(); const u64 ec = (esr >> 26) & 0x3F; - switch (ec) { - case EsrEc_Unknown: - case EsrEc_IllegalExecution: - case EsrEc_Svc32: - case EsrEc_Svc64: - case EsrEc_PcAlignmentFault: - case EsrEc_SpAlignmentFault: - case EsrEc_SErrorInterrupt: - case EsrEc_BreakPointEl0: - case EsrEc_SoftwareStepEl0: - case EsrEc_WatchPointEl0: - case EsrEc_BkptInstruction: - case EsrEc_BrkInstruction: - break; - default: - { - /* If the fault address's state is KMemoryState_Code and the user can't read the address, force processing exception. */ - KMemoryInfo info; - ams::svc::PageInfo pi; - if (R_SUCCEEDED(cur_process.GetPageTable().QueryInfo(std::addressof(info), std::addressof(pi), far))) { - if (info.GetState() == KMemoryState_Code && ((info.GetPermission() & KMemoryPermission_UserRead) != KMemoryPermission_UserRead)) { - should_process_user_exception = true; - } - } - } - break; - } /* In the event that we return from this exception, we want SPSR.SS set so that we advance an instruction if single-stepping. */ #if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP) diff --git a/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s b/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s index e697fde3c9..01104deb9a 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s +++ b/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s @@ -154,6 +154,21 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize32BitEPvPKv: mov x0, #1 ret +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryFromUserSize32BitWithSupervisorAccessImpl(void *dst, const void *src) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess51CopyMemoryFromUserSize32BitWithSupervisorAccessImplEPvPKv, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess51CopyMemoryFromUserSize32BitWithSupervisorAccessImplEPvPKv +.type _ZN3ams4kern4arch5arm6415UserspaceAccess51CopyMemoryFromUserSize32BitWithSupervisorAccessImplEPvPKv, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess51CopyMemoryFromUserSize32BitWithSupervisorAccessImplEPvPKv: + /* Just load and store a u32. */ + /* NOTE: This is done with supervisor access permissions. */ + ldr w2, [x1] + str w2, [x0] + + /* We're done. */ + mov x0, #1 + ret + /* ams::kern::arch::arm64::UserspaceAccess::CopyStringFromUser(void *dst, const void *src, size_t size) */ .section .text._ZN3ams4kern4arch5arm6415UserspaceAccess18CopyStringFromUserEPvPKvm, "ax", %progbits .global _ZN3ams4kern4arch5arm6415UserspaceAccess18CopyStringFromUserEPvPKvm diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index 2a9402ecc5..d141490ef5 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -1787,6 +1787,11 @@ namespace ams::kern { /* We're going to perform an update, so create a helper. */ KScopedPageTableUpdater updater(this); + /* If we're creating an executable mapping, take and immediately release the scheduler lock. This will force a reschedule. */ + if (is_x) { + KScopedSchedulerLock sl; + } + /* Perform mapping operation. */ const KPageProperties properties = { new_perm, false, false, DisableMergeAttribute_None }; const auto operation = was_x ? OperationType_ChangePermissionsAndRefreshAndFlush : OperationType_ChangePermissions; diff --git a/libraries/libmesosphere/source/svc/kern_svc_process_memory.cpp b/libraries/libmesosphere/source/svc/kern_svc_process_memory.cpp index 91d799d784..41420ca066 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_process_memory.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_process_memory.cpp @@ -27,6 +27,7 @@ namespace ams::kern::svc { case ams::svc::MemoryPermission_Read: case ams::svc::MemoryPermission_ReadWrite: case ams::svc::MemoryPermission_ReadExecute: + case ams::svc::MemoryPermission_Execute: return true; default: return false; diff --git a/libraries/libstratosphere/include/stratosphere/ldr/ldr_types.hpp b/libraries/libstratosphere/include/stratosphere/ldr/ldr_types.hpp index 325d2a35d4..e81be6b7d6 100644 --- a/libraries/libstratosphere/include/stratosphere/ldr/ldr_types.hpp +++ b/libraries/libstratosphere/include/stratosphere/ldr/ldr_types.hpp @@ -226,6 +226,7 @@ namespace ams::ldr { MetaFlag_OptimizeMemoryAllocation = (1 << 4), MetaFlag_DisableDeviceAddressSpaceMerge = (1 << 5), MetaFlag_EnableAliasRegionExtraSize = (1 << 6), + MetaFlag_PreventCodeReads = (1 << 7), }; enum AddressSpaceType { diff --git a/libraries/libstratosphere/source/os/impl/os_process_memory_impl.os.horizon.cpp b/libraries/libstratosphere/source/os/impl/os_process_memory_impl.os.horizon.cpp index de7b241a6d..686cc69951 100644 --- a/libraries/libstratosphere/source/os/impl/os_process_memory_impl.os.horizon.cpp +++ b/libraries/libstratosphere/source/os/impl/os_process_memory_impl.os.horizon.cpp @@ -27,6 +27,7 @@ namespace ams::os::impl { case os::MemoryPermission_ReadOnly: return svc::MemoryPermission_Read; case os::MemoryPermission_ReadWrite: return svc::MemoryPermission_ReadWrite; case os::MemoryPermission_ReadExecute: return svc::MemoryPermission_ReadExecute; + case os::MemoryPermission_ExecuteOnly: return svc::MemoryPermission_Execute; AMS_UNREACHABLE_DEFAULT_CASE(); } } diff --git a/stratosphere/loader/source/ldr_process_creation.cpp b/stratosphere/loader/source/ldr_process_creation.cpp index a7d6578970..c1daeeb9f8 100644 --- a/stratosphere/loader/source/ldr_process_creation.cpp +++ b/stratosphere/loader/source/ldr_process_creation.cpp @@ -555,7 +555,7 @@ namespace ams::ldr { R_SUCCEED(); } - Result LoadAutoLoadModule(os::NativeHandle process_handle, fs::FileHandle file, const NsoHeader *nso_header, uintptr_t nso_address, size_t nso_size) { + Result LoadAutoLoadModule(os::NativeHandle process_handle, fs::FileHandle file, const NsoHeader *nso_header, uintptr_t nso_address, size_t nso_size, bool prevent_code_reads) { /* Map and read data from file. */ { /* Map the process memory. */ @@ -594,7 +594,7 @@ namespace ams::ldr { const size_t ro_size = util::AlignUp(nso_header->ro_size, os::MemoryPageSize); const size_t rw_size = util::AlignUp(nso_header->rw_size + nso_header->bss_size, os::MemoryPageSize); if (text_size) { - R_TRY(os::SetProcessMemoryPermission(process_handle, nso_address + nso_header->text_dst_offset, text_size, os::MemoryPermission_ReadExecute)); + R_TRY(os::SetProcessMemoryPermission(process_handle, nso_address + nso_header->text_dst_offset, text_size, prevent_code_reads ? os::MemoryPermission_ExecuteOnly : os::MemoryPermission_ReadExecute)); } if (ro_size) { R_TRY(os::SetProcessMemoryPermission(process_handle, nso_address + nso_header->ro_dst_offset, ro_size, os::MemoryPermission_ReadOnly)); @@ -606,7 +606,7 @@ namespace ams::ldr { R_SUCCEED(); } - Result LoadAutoLoadModules(const ProcessInfo *process_info, const NsoHeader *nso_headers, const bool *has_nso, const ArgumentStore::Entry *argument) { + Result LoadAutoLoadModules(const ProcessInfo *process_info, const NsoHeader *nso_headers, const bool *has_nso, const ArgumentStore::Entry *argument, bool prevent_code_reads) { /* Load each NSO. */ for (size_t i = 0; i < Nso_Count; i++) { if (has_nso[i]) { @@ -614,7 +614,7 @@ namespace ams::ldr { R_TRY(fs::OpenFile(std::addressof(file), GetNsoPath(i), fs::OpenMode_Read)); ON_SCOPE_EXIT { fs::CloseFile(file); }; - R_TRY(LoadAutoLoadModule(process_info->process_handle, file, nso_headers + i, process_info->nso_address[i], process_info->nso_size[i])); + R_TRY(LoadAutoLoadModule(process_info->process_handle, file, nso_headers + i, process_info->nso_address[i], process_info->nso_size[i], prevent_code_reads)); } } @@ -658,7 +658,7 @@ namespace ams::ldr { ON_RESULT_FAILURE { svc::CloseHandle(process_handle); }; /* Load all auto load modules. */ - R_RETURN(LoadAutoLoadModules(out, nso_headers, has_nso, argument)); + R_RETURN(LoadAutoLoadModules(out, nso_headers, has_nso, argument, (meta->npdm->flags & ldr::Npdm::MetaFlag_PreventCodeReads) != 0)); } } From d78e450db6872f17dfedd10e6a475d8d8a204cae Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 9 Oct 2024 21:36:49 -0700 Subject: [PATCH 16/35] kern: add minimum alignment support to KMemoryManager --- .../nintendo/nx/kern_k_memory_layout.hpp | 4 +++ .../mesosphere/kern_k_memory_manager.hpp | 9 +++-- .../source/kern_k_memory_manager.cpp | 33 +++++++++++++++++-- .../libmesosphere/source/kern_k_process.cpp | 1 - libraries/libmesosphere/source/kern_main.cpp | 3 +- .../source/svc/kern_svc_physical_memory.cpp | 18 +++++----- .../source/svc/kern_svc_process.cpp | 6 ++-- 7 files changed, 58 insertions(+), 16 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_memory_layout.hpp index b12e8d02e0..4e82780b06 100644 --- a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_memory_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_memory_layout.hpp @@ -24,4 +24,8 @@ namespace ams::kern { constexpr inline size_t MainMemorySize = 4_GB; constexpr inline size_t MainMemorySizeMax = 8_GB; + constexpr inline u32 MinimumMemoryManagerAlignmentShifts[] = { + 0, 0, 0, 0 + }; + } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp index bd596dfd0f..99ef1f6b5f 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp @@ -164,6 +164,7 @@ namespace ams::kern { size_t m_num_managers; u64 m_optimized_process_ids[Pool_Count]; bool m_has_optimized_process[Pool_Count]; + s32 m_min_heap_indexes[Pool_Count]; private: Impl &GetManager(KPhysicalAddress address) { return m_managers[KMemoryLayout::GetPhysicalLinearRegion(address).GetAttributes()]; @@ -188,12 +189,12 @@ namespace ams::kern { Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index); public: KMemoryManager() - : m_pool_locks(), m_pool_managers_head(), m_pool_managers_tail(), m_managers(), m_num_managers(), m_optimized_process_ids(), m_has_optimized_process() + : m_pool_locks(), m_pool_managers_head(), m_pool_managers_tail(), m_managers(), m_num_managers(), m_optimized_process_ids(), m_has_optimized_process(), m_min_heap_indexes() { /* ... */ } - NOINLINE void Initialize(KVirtualAddress management_region, size_t management_region_size); + NOINLINE void Initialize(KVirtualAddress management_region, size_t management_region_size, const u32 *min_align_shifts); NOINLINE Result InitializeOptimizedMemory(u64 process_id, Pool pool); NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool); @@ -299,6 +300,10 @@ namespace ams::kern { manager->DumpFreeList(); } } + + size_t GetMinimumAlignment(Pool pool) { + return KPageHeap::GetBlockSize(m_min_heap_indexes[pool]); + } public: static size_t CalculateManagementOverheadSize(size_t region_size) { return Impl::CalculateManagementOverheadSize(region_size); diff --git a/libraries/libmesosphere/source/kern_k_memory_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_manager.cpp index fa533387c4..aa5f5bdcc8 100644 --- a/libraries/libmesosphere/source/kern_k_memory_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_manager.cpp @@ -35,7 +35,7 @@ namespace ams::kern { } - void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size) { + void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size, const u32 *min_align_shifts) { /* Clear the management region to zero. */ const KVirtualAddress management_region_end = management_region + management_region_size; std::memset(GetVoidPointer(management_region), 0, management_region_size); @@ -154,6 +154,17 @@ namespace ams::kern { for (size_t i = 0; i < m_num_managers; ++i) { m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]); } + + /* Determine the min heap size for all pools. */ + for (size_t i = 0; i < Pool_Count; ++i) { + /* Determine the min alignment for the pool in pages. */ + const size_t min_align_pages = 1 << min_align_shifts[i]; + + /* Determine a heap index. */ + if (const auto heap_index = KPageHeap::GetAlignedBlockIndex(min_align_pages, min_align_pages); heap_index >= 0) { + m_min_heap_indexes[i] = heap_index; + } + } } Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) { @@ -192,8 +203,19 @@ namespace ams::kern { return Null; } - /* Lock the pool that we're allocating from. */ + /* Determine the pool and direction we're allocating from. */ const auto [pool, dir] = DecodeOption(option); + + /* Check that we're allocating a correctly aligned number of pages. */ + const size_t min_align_pages = KPageHeap::GetBlockNumPages(m_min_heap_indexes[pool]); + if (!util::IsAligned(num_pages, min_align_pages)) { + return Null; + } + + /* Update our alignment. */ + align_pages = std::max(align_pages, min_align_pages); + + /* Lock the pool that we're allocating from. */ KScopedLightLock lk(m_pool_locks[pool]); /* Choose a heap based on our page size request. */ @@ -226,6 +248,13 @@ namespace ams::kern { } Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index) { + /* Check that we're allocating a correctly aligned number of pages. */ + const size_t min_align_pages = KPageHeap::GetBlockNumPages(m_min_heap_indexes[pool]); + R_UNLESS(util::IsAligned(num_pages, min_align_pages), svc::ResultInvalidSize()); + + /* Adjust our min heap index to the pool minimum if needed. */ + min_heap_index = std::max(min_heap_index, m_min_heap_indexes[pool]); + /* Choose a heap based on our page size request. */ const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory()); diff --git a/libraries/libmesosphere/source/kern_k_process.cpp b/libraries/libmesosphere/source/kern_k_process.cpp index 24c6baef52..19a9fd85ee 100644 --- a/libraries/libmesosphere/source/kern_k_process.cpp +++ b/libraries/libmesosphere/source/kern_k_process.cpp @@ -924,7 +924,6 @@ namespace ams::kern { MESOSPHERE_ABORT_UNLESS(m_main_thread_stack_size == 0); /* Ensure that we're allocating a valid stack. */ - stack_size = util::AlignUp(stack_size, PageSize); R_UNLESS(stack_size + m_code_size <= m_max_process_memory, svc::ResultOutOfMemory()); R_UNLESS(stack_size + m_code_size >= m_code_size, svc::ResultOutOfMemory()); diff --git a/libraries/libmesosphere/source/kern_main.cpp b/libraries/libmesosphere/source/kern_main.cpp index 0f1782c2ac..160bb495d1 100644 --- a/libraries/libmesosphere/source/kern_main.cpp +++ b/libraries/libmesosphere/source/kern_main.cpp @@ -56,8 +56,9 @@ namespace ams::kern { { const auto &management_region = KMemoryLayout::GetPoolManagementRegion(); MESOSPHERE_ABORT_UNLESS(management_region.GetEndAddress() != 0); + static_assert(util::size(MinimumMemoryManagerAlignmentShifts) == KMemoryManager::Pool_Count); - Kernel::GetMemoryManager().Initialize(management_region.GetAddress(), management_region.GetSize()); + Kernel::GetMemoryManager().Initialize(management_region.GetAddress(), management_region.GetSize(), MinimumMemoryManagerAlignmentShifts); } /* Copy the Initial Process Binary to safe memory. */ diff --git a/libraries/libmesosphere/source/svc/kern_svc_physical_memory.cpp b/libraries/libmesosphere/source/svc/kern_svc_physical_memory.cpp index fc5414905b..e2237877d5 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_physical_memory.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_physical_memory.cpp @@ -48,10 +48,11 @@ namespace ams::kern::svc { Result MapPhysicalMemory(uintptr_t address, size_t size) { /* Validate address / size. */ - R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress()); - R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); - R_UNLESS(size > 0, svc::ResultInvalidSize()); - R_UNLESS((address < address + size), svc::ResultInvalidMemoryRegion()); + const size_t min_alignment = Kernel::GetMemoryManager().GetMinimumAlignment(GetCurrentProcess().GetMemoryPool()); + R_UNLESS(util::IsAligned(address, min_alignment), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, min_alignment), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidMemoryRegion()); /* Verify that the process has system resource. */ auto &process = GetCurrentProcess(); @@ -69,10 +70,11 @@ namespace ams::kern::svc { Result UnmapPhysicalMemory(uintptr_t address, size_t size) { /* Validate address / size. */ - R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress()); - R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); - R_UNLESS(size > 0, svc::ResultInvalidSize()); - R_UNLESS((address < address + size), svc::ResultInvalidMemoryRegion()); + const size_t min_alignment = Kernel::GetMemoryManager().GetMinimumAlignment(GetCurrentProcess().GetMemoryPool()); + R_UNLESS(util::IsAligned(address, min_alignment), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, min_alignment), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidMemoryRegion()); /* Verify that the process has system resource. */ auto &process = GetCurrentProcess(); diff --git a/libraries/libmesosphere/source/svc/kern_svc_process.cpp b/libraries/libmesosphere/source/svc/kern_svc_process.cpp index 98c8740f2f..27ff14b4fc 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_process.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_process.cpp @@ -296,7 +296,9 @@ namespace ams::kern::svc { Result StartProcess(ams::svc::Handle process_handle, int32_t priority, int32_t core_id, uint64_t main_thread_stack_size) { /* Validate stack size. */ - R_UNLESS(main_thread_stack_size == static_cast(main_thread_stack_size), svc::ResultOutOfMemory()); + const uint64_t aligned_stack_size = util::AlignUp(main_thread_stack_size, Kernel::GetMemoryManager().GetMinimumAlignment(GetCurrentProcess().GetMemoryPool())); + R_UNLESS(aligned_stack_size >= main_thread_stack_size, svc::ResultOutOfMemory()); + R_UNLESS(aligned_stack_size == static_cast(aligned_stack_size), svc::ResultOutOfMemory()); /* Get the target process. */ KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject(process_handle); @@ -314,7 +316,7 @@ namespace ams::kern::svc { process->SetIdealCoreId(core_id); /* Run the process. */ - R_RETURN(process->Run(priority, static_cast(main_thread_stack_size))); + R_RETURN(process->Run(priority, static_cast(aligned_stack_size))); } Result TerminateProcess(ams::svc::Handle process_handle) { From 5ff0a236e26131f057056c7ed5d6f2d49420106b Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 9 Oct 2024 21:46:15 -0700 Subject: [PATCH 17/35] kern: specify allowable ipc client memory attr via inverted-whitelist, not blacklist --- libraries/libmesosphere/source/kern_k_page_table_base.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index d141490ef5..4ebaad0bd5 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -3834,15 +3834,15 @@ namespace ams::kern { switch (dst_state) { case KMemoryState_Ipc: test_state = KMemoryState_FlagCanUseIpc; - test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked; + test_attr_mask = KMemoryAttribute_All & (~(KMemoryAttribute_PermissionLocked | KMemoryAttribute_IpcLocked)); break; case KMemoryState_NonSecureIpc: test_state = KMemoryState_FlagCanUseNonSecureIpc; - test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked; + test_attr_mask = KMemoryAttribute_All & (~(KMemoryAttribute_PermissionLocked | KMemoryAttribute_DeviceShared | KMemoryAttribute_IpcLocked)); break; case KMemoryState_NonDeviceIpc: test_state = KMemoryState_FlagCanUseNonDeviceIpc; - test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked; + test_attr_mask = KMemoryAttribute_All & (~(KMemoryAttribute_PermissionLocked | KMemoryAttribute_DeviceShared | KMemoryAttribute_IpcLocked)); break; default: R_THROW(svc::ResultInvalidCombination()); From d0c557e30f5e5f6bb49690c47a0aa2506da0dbcd Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 9 Oct 2024 21:49:38 -0700 Subject: [PATCH 18/35] kern: add note that N ifdef'd out calling HandleException() for EL1 faults --- .../kernel/source/arch/arm64/kern_exception_handlers_asm.s | 1 + 1 file changed, 1 insertion(+) diff --git a/mesosphere/kernel/source/arch/arm64/kern_exception_handlers_asm.s b/mesosphere/kernel/source/arch/arm64/kern_exception_handlers_asm.s index d81eb7e897..a3a877b7e9 100644 --- a/mesosphere/kernel/source/arch/arm64/kern_exception_handlers_asm.s +++ b/mesosphere/kernel/source/arch/arm64/kern_exception_handlers_asm.s @@ -444,6 +444,7 @@ _ZN3ams4kern4arch5arm6430EL1SynchronousExceptionHandlerEv: ERET_WITH_SPECULATION_BARRIER 2: /* The exception wasn't an triggered by copying memory from userspace. */ + /* NOTE: The following is, as of 19.0.0, now ifdef'd out on NX non-debug kernel. */ ldr x0, [sp, #8] ldr x1, [sp, #16] From 4baf0e8cce68a2cb96dca455839c59281f0d345c Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 9 Oct 2024 22:01:45 -0700 Subject: [PATCH 19/35] kern: invoke supervisor mode thread functions from C++ context with valid stack frame --- .../source/arch/arm64/kern_k_thread_context.cpp | 9 +++++++++ .../kernel/source/arch/arm64/kern_k_thread_context_asm.s | 7 ++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp index b3cef67550..76e91cff76 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp @@ -21,6 +21,15 @@ namespace ams::kern::arch::arm64 { void UserModeThreadStarter(); void SupervisorModeThreadStarter(); + void InvokeSupervisorModeThread(uintptr_t argument, uintptr_t entrypoint) { + /* Invoke the function. */ + using SupervisorModeFunctionType = void (*)(uintptr_t); + reinterpret_cast(entrypoint)(argument); + + /* Wait forever. */ + AMS_INFINITE_LOOP(); + } + void OnThreadStart() { MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled()); /* Send KDebug event for this thread's creation. */ diff --git a/mesosphere/kernel/source/arch/arm64/kern_k_thread_context_asm.s b/mesosphere/kernel/source/arch/arm64/kern_k_thread_context_asm.s index 362adc7121..4b4b1bd3a5 100644 --- a/mesosphere/kernel/source/arch/arm64/kern_k_thread_context_asm.s +++ b/mesosphere/kernel/source/arch/arm64/kern_k_thread_context_asm.s @@ -76,6 +76,9 @@ _ZN3ams4kern4arch5arm6427SupervisorModeThreadStarterEv: /* v */ /* | u64 argument | u64 entrypoint | KThread::StackParameters (size 0x30) | */ + /* Clear the link register. */ + mov x30, #0 + /* Load the argument and entrypoint. */ ldp x0, x1, [sp], #0x10 @@ -84,4 +87,6 @@ _ZN3ams4kern4arch5arm6427SupervisorModeThreadStarterEv: /* Mask I bit in DAIF */ msr daifclr, #2 - br x1 + + /* Invoke the function (by calling ams::kern::arch::arm64::InvokeSupervisorModeThread(argument, entrypoint)). */ + b _ZN3ams4kern4arch5arm6426InvokeSupervisorModeThreadEmm From 5c6362c56df47a5cc893bc03f117815047ab92d3 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 01:40:28 -0700 Subject: [PATCH 20/35] ro: support NROs with read-only first page --- .../include/stratosphere/ro/ro_types.hpp | 17 +++++++++++++++-- stratosphere/ro/source/impl/ro_nro_utils.cpp | 8 ++++++-- stratosphere/ro/source/impl/ro_nro_utils.hpp | 2 +- stratosphere/ro/source/impl/ro_service_impl.cpp | 8 +++++--- 4 files changed, 27 insertions(+), 8 deletions(-) diff --git a/libraries/libstratosphere/include/stratosphere/ro/ro_types.hpp b/libraries/libstratosphere/include/stratosphere/ro/ro_types.hpp index 886b771da7..4a1ce2684e 100644 --- a/libraries/libstratosphere/include/stratosphere/ro/ro_types.hpp +++ b/libraries/libstratosphere/include/stratosphere/ro/ro_types.hpp @@ -135,14 +135,15 @@ namespace ams::ro { class NroHeader { public: static constexpr u32 Magic = util::FourCC<'N','R','O','0'>::Code; + static constexpr u32 FlagAlignedHeader = 1; private: u32 m_entrypoint_insn; u32 m_mod_offset; u8 m_reserved_08[0x8]; u32 m_magic; - u8 m_reserved_14[0x4]; + u8 m_version; u32 m_size; - u8 m_reserved_1C[0x4]; + u32 m_flags; u32 m_text_offset; u32 m_text_size; u32 m_ro_offset; @@ -158,10 +159,22 @@ namespace ams::ro { return m_magic == Magic; } + u32 GetVersion() const { + return m_version; + } + u32 GetSize() const { return m_size; } + u32 GetFlags() const { + return m_flags; + } + + bool IsAlignedHeader() const { + return m_flags & FlagAlignedHeader; + } + u32 GetTextOffset() const { return m_text_offset; } diff --git a/stratosphere/ro/source/impl/ro_nro_utils.cpp b/stratosphere/ro/source/impl/ro_nro_utils.cpp index 588a263879..3f01a1c999 100644 --- a/stratosphere/ro/source/impl/ro_nro_utils.cpp +++ b/stratosphere/ro/source/impl/ro_nro_utils.cpp @@ -51,11 +51,15 @@ namespace ams::ro::impl { R_SUCCEED(); } - Result SetNroPerms(os::NativeHandle process_handle, u64 base_address, u64 rx_size, u64 ro_size, u64 rw_size) { - const u64 rx_offset = 0; + Result SetNroPerms(os::NativeHandle process_handle, u64 base_address, u64 rx_size, u64 ro_size, u64 rw_size, bool is_aligned_header) { + const u64 rx_offset = is_aligned_header ? os::MemoryPageSize : 0; const u64 ro_offset = rx_offset + rx_size; const u64 rw_offset = ro_offset + ro_size; + if (is_aligned_header) { + R_TRY(os::SetProcessMemoryPermission(process_handle, base_address, os::MemoryPageSize, os::MemoryPermission_ReadOnly)); + } + R_TRY(os::SetProcessMemoryPermission(process_handle, base_address + rx_offset, rx_size, os::MemoryPermission_ReadExecute)); R_TRY(os::SetProcessMemoryPermission(process_handle, base_address + ro_offset, ro_size, os::MemoryPermission_ReadOnly)); R_TRY(os::SetProcessMemoryPermission(process_handle, base_address + rw_offset, rw_size, os::MemoryPermission_ReadWrite)); diff --git a/stratosphere/ro/source/impl/ro_nro_utils.hpp b/stratosphere/ro/source/impl/ro_nro_utils.hpp index df018c8720..9ea2f7b32c 100644 --- a/stratosphere/ro/source/impl/ro_nro_utils.hpp +++ b/stratosphere/ro/source/impl/ro_nro_utils.hpp @@ -21,7 +21,7 @@ namespace ams::ro::impl { /* Utilities for working with NROs. */ Result MapNro(u64 *out_base_address, os::NativeHandle process_handle, u64 nro_heap_address, u64 nro_heap_size, u64 bss_heap_address, u64 bss_heap_size); - Result SetNroPerms(os::NativeHandle process_handle, u64 base_address, u64 rx_size, u64 ro_size, u64 rw_size); + Result SetNroPerms(os::NativeHandle process_handle, u64 base_address, u64 rx_size, u64 ro_size, u64 rw_size, bool is_aligned_header); Result UnmapNro(os::NativeHandle process_handle, u64 base_address, u64 nro_heap_address, u64 nro_heap_size, u64 bss_heap_address, u64 bss_heap_size); } \ No newline at end of file diff --git a/stratosphere/ro/source/impl/ro_service_impl.cpp b/stratosphere/ro/source/impl/ro_service_impl.cpp index 0be672f37d..0eee275eb8 100644 --- a/stratosphere/ro/source/impl/ro_service_impl.cpp +++ b/stratosphere/ro/source/impl/ro_service_impl.cpp @@ -247,7 +247,7 @@ namespace ams::ro::impl { R_THROW(ro::ResultNotAuthorized()); } - Result ValidateNro(ModuleId *out_module_id, u64 *out_rx_size, u64 *out_ro_size, u64 *out_rw_size, u64 base_address, u64 expected_nro_size, u64 expected_bss_size) { + Result ValidateNro(ModuleId *out_module_id, u64 *out_rx_size, u64 *out_ro_size, u64 *out_rw_size, bool *out_aligned_header, u64 base_address, u64 expected_nro_size, u64 expected_bss_size) { /* Map the NRO. */ void *mapped_memory = nullptr; R_TRY_CATCH(os::MapProcessMemory(std::addressof(mapped_memory), m_process_handle, base_address, expected_nro_size, ro::impl::GenerateSecureRandom)) { @@ -306,6 +306,7 @@ namespace ams::ro::impl { *out_rx_size = text_size; *out_ro_size = ro_size; *out_rw_size = rw_size; + *out_aligned_header = header->IsAlignedHeader(); R_SUCCEED(); } @@ -557,10 +558,11 @@ namespace ams::ro::impl { /* Validate the NRO (parsing region extents). */ u64 rx_size = 0, ro_size = 0, rw_size = 0; - R_TRY(context->ValidateNro(std::addressof(nro_info->module_id), std::addressof(rx_size), std::addressof(ro_size), std::addressof(rw_size), nro_info->base_address, nro_size, bss_size)); + bool aligned_header = false; + R_TRY(context->ValidateNro(std::addressof(nro_info->module_id), std::addressof(rx_size), std::addressof(ro_size), std::addressof(rw_size), std::addressof(aligned_header), nro_info->base_address, nro_size, bss_size)); /* Set NRO perms. */ - R_TRY(SetNroPerms(context->GetProcessHandle(), nro_info->base_address, rx_size, ro_size, rw_size + bss_size)); + R_TRY(SetNroPerms(context->GetProcessHandle(), nro_info->base_address, rx_size, ro_size, rw_size + bss_size, aligned_header)); context->SetNroInfoInUse(nro_info, true); nro_info->code_size = rx_size + ro_size; From a6e14c5989236709b6c60a72685e6b1e56426de3 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 02:37:18 -0700 Subject: [PATCH 21/35] ns_mitm: update to support new 19.0.0 command --- .../include/stratosphere/hos/hos_types.hpp | 1 + .../ams_mitm/source/ns_mitm/ns_shim.c | 27 +++++++++++++++++++ .../ams_mitm/source/ns_mitm/ns_shim.h | 1 + .../source/ns_mitm/ns_web_mitm_service.cpp | 5 ++++ .../source/ns_mitm/ns_web_mitm_service.hpp | 10 ++++--- 5 files changed, 40 insertions(+), 4 deletions(-) diff --git a/libraries/libstratosphere/include/stratosphere/hos/hos_types.hpp b/libraries/libstratosphere/include/stratosphere/hos/hos_types.hpp index 597cc0876d..a096237a88 100644 --- a/libraries/libstratosphere/include/stratosphere/hos/hos_types.hpp +++ b/libraries/libstratosphere/include/stratosphere/hos/hos_types.hpp @@ -84,6 +84,7 @@ namespace ams::hos { Version_17_0_1 = ::ams::TargetFirmware_17_0_1, Version_18_0_0 = ::ams::TargetFirmware_18_0_0, Version_18_1_0 = ::ams::TargetFirmware_18_1_0, + Version_19_0_0 = ::ams::TargetFirmware_19_0_0, Version_Current = ::ams::TargetFirmware_Current, diff --git a/stratosphere/ams_mitm/source/ns_mitm/ns_shim.c b/stratosphere/ams_mitm/source/ns_mitm/ns_shim.c index 4b6d1b8cd7..569ac2f619 100644 --- a/stratosphere/ams_mitm/source/ns_mitm/ns_shim.c +++ b/stratosphere/ams_mitm/source/ns_mitm/ns_shim.c @@ -48,6 +48,29 @@ static Result _nsGetApplicationContentPath(Service *s, void* out, size_t out_siz ); } +static Result _nsGetApplicationContentPath2(Service *s, void* out_path, size_t out_size, u64* out_program_id, u8 *out_attr, u64 app_id, NcmContentType content_type) { + const struct { + u8 content_type; + u64 app_id; + } in = { content_type, app_id }; + + struct { + u8 attr; + u64 program_id; + } out; + + Result rc = serviceDispatchInOut(s, 2524, in, out, + .buffer_attrs = { SfBufferAttr_HipcMapAlias | SfBufferAttr_Out }, + .buffers = { { out_path, out_size } }, + ); + if (R_SUCCEEDED(rc)) { + *out_program_id = out.program_id; + *out_attr = out.attr; + } + + return rc; +} + static Result _nsResolveApplicationContentPath(Service* s, u64 app_id, NcmContentType content_type) { const struct { u8 content_type; @@ -93,6 +116,10 @@ Result nswebGetRunningApplicationProgramId(NsDocumentInterface* doc, u64* out_pr return _nsGetRunningApplicationProgramId(&doc->s, out_program_id, app_id); } +Result nswebGetApplicationContentPath2(NsDocumentInterface* doc, void* out, size_t out_size, u64* out_program_id, u8 *out_attr, u64 app_id, NcmContentType content_type) { + return _nsGetApplicationContentPath2(&doc->s, out, out_size, out_program_id, out_attr, app_id, content_type); +} + void nsDocumentInterfaceClose(NsDocumentInterface* doc) { serviceClose(&doc->s); } diff --git a/stratosphere/ams_mitm/source/ns_mitm/ns_shim.h b/stratosphere/ams_mitm/source/ns_mitm/ns_shim.h index afb8125d30..9cfd2774d1 100644 --- a/stratosphere/ams_mitm/source/ns_mitm/ns_shim.h +++ b/stratosphere/ams_mitm/source/ns_mitm/ns_shim.h @@ -25,6 +25,7 @@ Result nsamGetRunningApplicationProgramIdFwd(Service* s, u64* out_program_id, u6 Result nswebGetApplicationContentPath(NsDocumentInterface* doc, void* out, size_t out_size, u8 *out_attr, u64 app_id, NcmContentType content_type); Result nswebResolveApplicationContentPath(NsDocumentInterface* doc, u64 app_id, NcmContentType content_type); Result nswebGetRunningApplicationProgramId(NsDocumentInterface* doc, u64* out_program_id, u64 app_id); +Result nswebGetApplicationContentPath2(NsDocumentInterface* doc, void* out, size_t out_size, u64* out_program_id, u8 *out_attr, u64 app_id, NcmContentType content_type); void nsDocumentInterfaceClose(NsDocumentInterface* doc); diff --git a/stratosphere/ams_mitm/source/ns_mitm/ns_web_mitm_service.cpp b/stratosphere/ams_mitm/source/ns_mitm/ns_web_mitm_service.cpp index 9568f91084..5613d267e3 100644 --- a/stratosphere/ams_mitm/source/ns_mitm/ns_web_mitm_service.cpp +++ b/stratosphere/ams_mitm/source/ns_mitm/ns_web_mitm_service.cpp @@ -38,6 +38,11 @@ namespace ams::mitm::ns { R_RETURN(nswebGetRunningApplicationProgramId(m_srv.get(), reinterpret_cast(out.GetPointer()), static_cast(application_id))); } + Result NsDocumentService::GetApplicationContentPath2(const sf::OutBuffer &out_path, sf::Out out_program_id, sf::Out out_attr, ncm::ProgramId application_id, u8 content_type) { + static_assert(sizeof(*out_attr.GetPointer()) == sizeof(u8)); + R_RETURN(nswebGetApplicationContentPath2(m_srv.get(), out_path.GetPointer(), out_path.GetSize(), reinterpret_cast(out_program_id.GetPointer()), reinterpret_cast(out_attr.GetPointer()), static_cast(application_id), static_cast(content_type))); + } + Result NsWebMitmService::GetDocumentInterface(sf::Out> out) { /* Open a document interface. */ NsDocumentInterface doc; diff --git a/stratosphere/ams_mitm/source/ns_mitm/ns_web_mitm_service.hpp b/stratosphere/ams_mitm/source/ns_mitm/ns_web_mitm_service.hpp index b08669e592..e9adbe46d3 100644 --- a/stratosphere/ams_mitm/source/ns_mitm/ns_web_mitm_service.hpp +++ b/stratosphere/ams_mitm/source/ns_mitm/ns_web_mitm_service.hpp @@ -18,10 +18,11 @@ #include "ns_shim.h" -#define AMS_NS_DOCUMENT_MITM_INTERFACE_INFO(C, H) \ - AMS_SF_METHOD_INFO(C, H, 21, Result, GetApplicationContentPath, (const sf::OutBuffer &out_path, sf::Out out_attr, ncm::ProgramId application_id, u8 content_type), (out_path, out_attr, application_id, content_type)) \ - AMS_SF_METHOD_INFO(C, H, 23, Result, ResolveApplicationContentPath, (ncm::ProgramId application_id, u8 content_type), (application_id, content_type)) \ - AMS_SF_METHOD_INFO(C, H, 92, Result, GetRunningApplicationProgramId, (sf::Out out, ncm::ProgramId application_id), (out, application_id), hos::Version_6_0_0) +#define AMS_NS_DOCUMENT_MITM_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 21, Result, GetApplicationContentPath, (const sf::OutBuffer &out_path, sf::Out out_attr, ncm::ProgramId application_id, u8 content_type), (out_path, out_attr, application_id, content_type)) \ + AMS_SF_METHOD_INFO(C, H, 23, Result, ResolveApplicationContentPath, (ncm::ProgramId application_id, u8 content_type), (application_id, content_type)) \ + AMS_SF_METHOD_INFO(C, H, 92, Result, GetRunningApplicationProgramId, (sf::Out out, ncm::ProgramId application_id), (out, application_id), hos::Version_6_0_0) \ + AMS_SF_METHOD_INFO(C, H, 2524, Result, GetApplicationContentPath2, (const sf::OutBuffer &out_path, sf::Out out_program_id, sf::Out out_attr, ncm::ProgramId application_id, u8 content_type), (out_path, out_program_id, out_attr, application_id, content_type), hos::Version_19_0_0) AMS_SF_DEFINE_INTERFACE(ams::mitm::ns::impl, IDocumentInterface, AMS_NS_DOCUMENT_MITM_INTERFACE_INFO, 0x0F9B1C00) @@ -47,6 +48,7 @@ namespace ams::mitm::ns { Result GetApplicationContentPath(const sf::OutBuffer &out_path, sf::Out out_attr, ncm::ProgramId application_id, u8 content_type); Result ResolveApplicationContentPath(ncm::ProgramId application_id, u8 content_type); Result GetRunningApplicationProgramId(sf::Out out, ncm::ProgramId application_id); + Result GetApplicationContentPath2(const sf::OutBuffer &out_path, sf::Out out_program_id, sf::Out out_attr, ncm::ProgramId application_id, u8 content_type); }; static_assert(impl::IsIDocumentInterface); From 49763aee9298224d73f941ebc92d1a1793476e2f Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 02:44:19 -0700 Subject: [PATCH 22/35] pm: add new 19.0.0 commands This is functionally correct, but I have no idea what these are meant to represent. These functions are completely unused on NX. --- .../stratosphere/pm/impl/pm_boot_mode_interface.hpp | 8 +++++--- .../libvapours/include/vapours/results/pm_results.hpp | 1 + stratosphere/pm/source/pm_boot_mode_service.cpp | 11 +++++++++++ stratosphere/pm/source/pm_boot_mode_service.hpp | 2 ++ 4 files changed, 19 insertions(+), 3 deletions(-) diff --git a/libraries/libstratosphere/include/stratosphere/pm/impl/pm_boot_mode_interface.hpp b/libraries/libstratosphere/include/stratosphere/pm/impl/pm_boot_mode_interface.hpp index a34f2272d2..66faf57e2f 100644 --- a/libraries/libstratosphere/include/stratosphere/pm/impl/pm_boot_mode_interface.hpp +++ b/libraries/libstratosphere/include/stratosphere/pm/impl/pm_boot_mode_interface.hpp @@ -19,8 +19,10 @@ #include #include -#define AMS_PM_I_BOOT_MODE_INTERFACE_INTERFACE_INFO(C, H) \ - AMS_SF_METHOD_INFO(C, H, 0, void, GetBootMode, (sf::Out out), (out)) \ - AMS_SF_METHOD_INFO(C, H, 1, void, SetMaintenanceBoot, (), ()) +#define AMS_PM_I_BOOT_MODE_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, void, GetBootMode, (sf::Out out), (out)) \ + AMS_SF_METHOD_INFO(C, H, 1, void, SetMaintenanceBoot, (), ()) \ + AMS_SF_METHOD_INFO(C, H, 2, void, GetUnknown, (sf::Out out), (out)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, SetUnknown, (u32 val), (val)) AMS_SF_DEFINE_INTERFACE(ams::pm::impl, IBootModeInterface, AMS_PM_I_BOOT_MODE_INTERFACE_INTERFACE_INFO, 0x96D01649) diff --git a/libraries/libvapours/include/vapours/results/pm_results.hpp b/libraries/libvapours/include/vapours/results/pm_results.hpp index 4c9e51ad5c..4e4c0e24b7 100644 --- a/libraries/libvapours/include/vapours/results/pm_results.hpp +++ b/libraries/libvapours/include/vapours/results/pm_results.hpp @@ -27,5 +27,6 @@ namespace ams::pm { R_DEFINE_ERROR_RESULT(DebugHookInUse, 4); R_DEFINE_ERROR_RESULT(ApplicationRunning, 5); R_DEFINE_ERROR_RESULT(InvalidSize, 6); + R_DEFINE_ERROR_RESULT(Unknown7, 7); } diff --git a/stratosphere/pm/source/pm_boot_mode_service.cpp b/stratosphere/pm/source/pm_boot_mode_service.cpp index ff389fe75f..a1a762d34b 100644 --- a/stratosphere/pm/source/pm_boot_mode_service.cpp +++ b/stratosphere/pm/source/pm_boot_mode_service.cpp @@ -22,6 +22,7 @@ namespace ams::pm { /* Global bootmode. */ constinit BootMode g_boot_mode = BootMode::Normal; + constinit u32 g_unknown = 0; } @@ -47,4 +48,14 @@ namespace ams::pm { pm::bm::SetMaintenanceBoot(); } + void BootModeService::GetUnknown(sf::Out out) { + out.SetValue(g_unknown); + } + + Result BootModeService::SetUnknown(u32 val) { + R_UNLESS(val <= 3, pm::ResultUnknown7()); + g_unknown = val; + R_SUCCEED(); + } + } diff --git a/stratosphere/pm/source/pm_boot_mode_service.hpp b/stratosphere/pm/source/pm_boot_mode_service.hpp index c650e9b471..b47536ed79 100644 --- a/stratosphere/pm/source/pm_boot_mode_service.hpp +++ b/stratosphere/pm/source/pm_boot_mode_service.hpp @@ -22,6 +22,8 @@ namespace ams::pm { public: void GetBootMode(sf::Out out); void SetMaintenanceBoot(); + void GetUnknown(sf::Out out); + Result SetUnknown(u32 val); }; static_assert(pm::impl::IsIBootModeInterface); From 0c4ae5573153166e6eb026cc0da1bb41d77e194b Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 03:45:09 -0700 Subject: [PATCH 23/35] ldr: ProgramInfo is 0x410 now, and fix debug flags for hbl --- .../include/stratosphere/ldr/ldr_types.hpp | 3 ++- .../source/ldr/ldr_pm_api.os.horizon.cpp | 2 ++ stratosphere/loader/source/ldr_capabilities.cpp | 14 ++++++++++++++ stratosphere/loader/source/ldr_capabilities.hpp | 2 ++ stratosphere/loader/source/ldr_main.cpp | 2 +- stratosphere/loader/source/ldr_meta.cpp | 4 ++++ 6 files changed, 25 insertions(+), 2 deletions(-) diff --git a/libraries/libstratosphere/include/stratosphere/ldr/ldr_types.hpp b/libraries/libstratosphere/include/stratosphere/ldr/ldr_types.hpp index e81be6b7d6..7d1a94aa12 100644 --- a/libraries/libstratosphere/include/stratosphere/ldr/ldr_types.hpp +++ b/libraries/libstratosphere/include/stratosphere/ldr/ldr_types.hpp @@ -34,9 +34,10 @@ namespace ams::ldr { u32 aci_sac_size; u32 acid_fac_size; u32 aci_fah_size; + u8 unused_20[0x10]; u8 ac_buffer[0x3E0]; }; - static_assert(util::is_pod::value && sizeof(ProgramInfo) == 0x400, "ProgramInfo definition!"); + static_assert(util::is_pod::value && sizeof(ProgramInfo) == 0x410, "ProgramInfo definition!"); enum ProgramInfoFlag { ProgramInfoFlag_SystemModule = (0 << 0), diff --git a/libraries/libstratosphere/source/ldr/ldr_pm_api.os.horizon.cpp b/libraries/libstratosphere/source/ldr/ldr_pm_api.os.horizon.cpp index 8441977f5a..2e00417dee 100644 --- a/libraries/libstratosphere/source/ldr/ldr_pm_api.os.horizon.cpp +++ b/libraries/libstratosphere/source/ldr/ldr_pm_api.os.horizon.cpp @@ -24,6 +24,7 @@ namespace ams::ldr::pm { } Result GetProgramInfo(ProgramInfo *out, const ncm::ProgramLocation &loc) { + static_assert(sizeof(*out) == sizeof(LoaderProgramInfo)); R_RETURN(ldrPmGetProgramInfo(reinterpret_cast(std::addressof(loc)), reinterpret_cast(out))); } @@ -42,6 +43,7 @@ namespace ams::ldr::pm { Result AtmosphereGetProgramInfo(ProgramInfo *out, cfg::OverrideStatus *out_status, const ncm::ProgramLocation &loc) { static_assert(sizeof(*out_status) == sizeof(CfgOverrideStatus), "CfgOverrideStatus definition!"); + static_assert(sizeof(*out) == sizeof(LoaderProgramInfo)); R_RETURN(ldrPmAtmosphereGetProgramInfo(reinterpret_cast(out), reinterpret_cast(out_status), reinterpret_cast(std::addressof(loc)))); } diff --git a/stratosphere/loader/source/ldr_capabilities.cpp b/stratosphere/loader/source/ldr_capabilities.cpp index cd93d855f5..b3bc25e652 100644 --- a/stratosphere/loader/source/ldr_capabilities.cpp +++ b/stratosphere/loader/source/ldr_capabilities.cpp @@ -425,6 +425,20 @@ namespace ams::ldr { } } + void FixDebugCapabilityForHbl(util::BitPack32 *kac, size_t count) { + for (size_t i = 0; i < count; ++i) { + const auto cap = kac[i]; + switch (GetCapabilityId(cap)) { + case CapabilityId::DebugFlags: + /* 19.0.0+ disallows more than one flag set; we are always DebugMode for kernel, so ForceDebug is the most powerful/flexible flag to set. */ + kac[i] = CapabilityDebugFlags::Encode(false, false, true); + break; + default: + break; + } + } + } + void PreProcessCapability(util::BitPack32 *kac, size_t count) { for (size_t i = 0; i < count; ++i) { const auto cap = kac[i]; diff --git a/stratosphere/loader/source/ldr_capabilities.hpp b/stratosphere/loader/source/ldr_capabilities.hpp index 0a91b23cc1..21748a7fb3 100644 --- a/stratosphere/loader/source/ldr_capabilities.hpp +++ b/stratosphere/loader/source/ldr_capabilities.hpp @@ -23,6 +23,8 @@ namespace ams::ldr { u16 MakeProgramInfoFlag(const util::BitPack32 *kac, size_t count); void UpdateProgramInfoFlag(u16 flags, util::BitPack32 *kac, size_t count); + void FixDebugCapabilityForHbl(util::BitPack32 *kac, size_t count); + void PreProcessCapability(util::BitPack32 *kac, size_t count); } diff --git a/stratosphere/loader/source/ldr_main.cpp b/stratosphere/loader/source/ldr_main.cpp index 5b6001f93a..5e4d8ca064 100644 --- a/stratosphere/loader/source/ldr_main.cpp +++ b/stratosphere/loader/source/ldr_main.cpp @@ -46,7 +46,7 @@ namespace ams { namespace { struct ServerOptions { - static constexpr size_t PointerBufferSize = 0x400; + static constexpr size_t PointerBufferSize = 0x420; static constexpr size_t MaxDomains = 0; static constexpr size_t MaxDomainObjects = 0; static constexpr bool CanDeferInvokeRequest = false; diff --git a/stratosphere/loader/source/ldr_meta.cpp b/stratosphere/loader/source/ldr_meta.cpp index ee43aeedc0..b1d1a85ad6 100644 --- a/stratosphere/loader/source/ldr_meta.cpp +++ b/stratosphere/loader/source/ldr_meta.cpp @@ -252,6 +252,10 @@ namespace ams::ldr { meta->npdm->main_thread_priority = HblMainThreadPriorityApplet; } } + + /* Fix the debug capabilities, to prevent needing a hbl recompilation. */ + FixDebugCapabilityForHbl(static_cast(meta->acid_kac), meta->acid->kac_size / sizeof(util::BitPack32)); + FixDebugCapabilityForHbl(static_cast(meta->aci_kac), meta->aci->kac_size / sizeof(util::BitPack32)); } else if (hos::GetVersion() >= hos::Version_10_0_0) { /* If storage id is none, there is no base code filesystem, and thus it is impossible for us to validate. */ /* However, if we're an application, we are guaranteed a base code filesystem. */ From 197ffa1dbc58b7ee41ef65a219ee94fa0e50707d Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 05:31:57 -0700 Subject: [PATCH 24/35] kern: start KPageTable(Impl) refactor, use array-with-levels for KPageTableImpl --- .../arch/arm64/kern_k_page_table_entry.hpp | 13 +- .../arch/arm64/kern_k_page_table_impl.hpp | 34 +- .../arch/arm64/kern_k_page_table_impl.cpp | 394 ++++++++---------- 3 files changed, 208 insertions(+), 233 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp index 93925e3fa1..8941928edf 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp @@ -170,9 +170,17 @@ namespace ams::kern::arch::arm64 { constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; } constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; } constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; } + + constexpr ALWAYS_INLINE u64 GetTestTableMask() const { return (m_attributes & ExtensionFlag_TestTableMask); } + constexpr ALWAYS_INLINE bool IsBlock() const { return (m_attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_Valid; } + constexpr ALWAYS_INLINE bool IsPage() const { return (m_attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_TestTableMask; } constexpr ALWAYS_INLINE bool IsTable() const { return (m_attributes & ExtensionFlag_TestTableMask) == 2; } constexpr ALWAYS_INLINE bool IsEmpty() const { return (m_attributes & ExtensionFlag_TestTableMask) == 0; } + + constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const { return this->SelectBits(12, 36); } + + constexpr ALWAYS_INLINE bool IsMappedTable() const { return this->GetBits(0, 2) == 3; } constexpr ALWAYS_INLINE bool IsMapped() const { return this->GetBits(0, 1) != 0; } constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; } @@ -196,10 +204,13 @@ namespace ams::kern::arch::arm64 { return (m_attributes & BaseMaskForMerge) == attr; } - constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafeForSwap() const { + constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafe() const { return m_attributes; } + constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafeForSwap() const { + return m_attributes; + } protected: constexpr ALWAYS_INLINE u64 GetRawAttributes() const { return m_attributes; diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp index e5df6defb5..8616ac0ddb 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp @@ -37,10 +37,17 @@ namespace ams::kern::arch::arm64 { constexpr bool IsTailMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHeadTail) != 0; } }; + enum EntryLevel : u32 { + EntryLevel_L3 = 0, + EntryLevel_L2 = 1, + EntryLevel_L1 = 2, + EntryLevel_Count = 3, + }; + struct TraversalContext { - const L1PageTableEntry *l1_entry; - const L2PageTableEntry *l2_entry; - const L3PageTableEntry *l3_entry; + const PageTableEntry *level_entries[EntryLevel_Count]; + EntryLevel level; + bool is_contiguous; }; private: static constexpr size_t PageBits = util::CountTrailingZeros(PageSize); @@ -53,16 +60,26 @@ namespace ams::kern::arch::arm64 { return (value >> Offset) & ((1ul << Count) - 1); } + static constexpr ALWAYS_INLINE u64 GetBits(u64 value, size_t offset, size_t count) { + return (value >> offset) & ((1ul << count) - 1); + } + template - constexpr ALWAYS_INLINE u64 SelectBits(u64 value) { + static constexpr ALWAYS_INLINE u64 SelectBits(u64 value) { return value & (((1ul << Count) - 1) << Offset); } + static constexpr ALWAYS_INLINE u64 SelectBits(u64 value, size_t offset, size_t count) { + return value & (((1ul << count) - 1) << offset); + } + static constexpr ALWAYS_INLINE uintptr_t GetL0Index(KProcessAddress addr) { return GetBits(GetInteger(addr)); } static constexpr ALWAYS_INLINE uintptr_t GetL1Index(KProcessAddress addr) { return GetBits(GetInteger(addr)); } static constexpr ALWAYS_INLINE uintptr_t GetL2Index(KProcessAddress addr) { return GetBits(GetInteger(addr)); } static constexpr ALWAYS_INLINE uintptr_t GetL3Index(KProcessAddress addr) { return GetBits(GetInteger(addr)); } + static constexpr ALWAYS_INLINE uintptr_t GetLevelIndex(KProcessAddress addr, EntryLevel level) { return GetBits(GetInteger(addr), PageBits + LevelBits * level, LevelBits); } + static constexpr ALWAYS_INLINE uintptr_t GetL1Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 1)>(GetInteger(addr)); } static constexpr ALWAYS_INLINE uintptr_t GetL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2)>(GetInteger(addr)); } static constexpr ALWAYS_INLINE uintptr_t GetL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3)>(GetInteger(addr)); } @@ -70,13 +87,16 @@ namespace ams::kern::arch::arm64 { static constexpr ALWAYS_INLINE uintptr_t GetContiguousL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2) + 4>(GetInteger(addr)); } static constexpr ALWAYS_INLINE uintptr_t GetContiguousL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3) + 4>(GetInteger(addr)); } + static constexpr ALWAYS_INLINE uintptr_t GetBlock(const PageTableEntry *pte, EntryLevel level) { return SelectBits(pte->GetRawAttributesUnsafe(), PageBits + LevelBits * level, LevelBits * (NumLevels + 1 - level)); } + static constexpr ALWAYS_INLINE uintptr_t GetOffset(KProcessAddress addr, EntryLevel level) { return GetBits(GetInteger(addr), 0, PageBits + LevelBits * level); } + static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) { return KMemoryLayout::GetLinearVirtualAddress(addr); } - ALWAYS_INLINE bool ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const; - ALWAYS_INLINE bool ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const; - ALWAYS_INLINE bool ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const; + //ALWAYS_INLINE bool ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const; + //ALWAYS_INLINE bool ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const; + //ALWAYS_INLINE bool ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const; private: L1PageTableEntry *m_table; bool m_is_kernel; diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp index 280498f782..5bc775f664 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -33,103 +33,98 @@ namespace ams::kern::arch::arm64 { return m_table; } - bool KPageTableImpl::ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const { - /* Set the L3 entry. */ - out_context->l3_entry = l3_entry; - - if (l3_entry->IsBlock()) { - /* Set the output entry. */ - out_entry->phys_addr = l3_entry->GetBlock() + (virt_addr & (L3BlockSize - 1)); - if (l3_entry->IsContiguous()) { - out_entry->block_size = L3ContiguousBlockSize; - } else { - out_entry->block_size = L3BlockSize; - } - out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits(); - out_entry->attr = 0; - - return true; - } else { - out_entry->phys_addr = Null; - out_entry->block_size = L3BlockSize; - out_entry->sw_reserved_bits = 0; - out_entry->attr = 0; - return false; - } - } - - bool KPageTableImpl::ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const { - /* Set the L2 entry. */ - out_context->l2_entry = l2_entry; - - if (l2_entry->IsBlock()) { - /* Set the output entry. */ - out_entry->phys_addr = l2_entry->GetBlock() + (virt_addr & (L2BlockSize - 1)); - if (l2_entry->IsContiguous()) { - out_entry->block_size = L2ContiguousBlockSize; - } else { - out_entry->block_size = L2BlockSize; - } - out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits(); - out_entry->attr = 0; - - /* Set the output context. */ - out_context->l3_entry = nullptr; - return true; - } else if (l2_entry->IsTable()) { - return this->ExtractL3Entry(out_entry, out_context, this->GetL3EntryFromTable(GetPageTableVirtualAddress(l2_entry->GetTable()), virt_addr), virt_addr); - } else { - out_entry->phys_addr = Null; - out_entry->block_size = L2BlockSize; - out_entry->sw_reserved_bits = 0; - out_entry->attr = 0; - - out_context->l3_entry = nullptr; - return false; - } - } - - bool KPageTableImpl::ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const { - /* Set the L1 entry. */ - out_context->l1_entry = l1_entry; - - if (l1_entry->IsBlock()) { - /* Set the output entry. */ - out_entry->phys_addr = l1_entry->GetBlock() + (virt_addr & (L1BlockSize - 1)); - if (l1_entry->IsContiguous()) { - out_entry->block_size = L1ContiguousBlockSize; - } else { - out_entry->block_size = L1BlockSize; - } - out_entry->sw_reserved_bits = l1_entry->GetSoftwareReservedBits(); - - /* Set the output context. */ - out_context->l2_entry = nullptr; - out_context->l3_entry = nullptr; - return true; - } else if (l1_entry->IsTable()) { - return this->ExtractL2Entry(out_entry, out_context, this->GetL2EntryFromTable(GetPageTableVirtualAddress(l1_entry->GetTable()), virt_addr), virt_addr); - } else { - out_entry->phys_addr = Null; - out_entry->block_size = L1BlockSize; - out_entry->sw_reserved_bits = 0; - out_entry->attr = 0; - - out_context->l2_entry = nullptr; - out_context->l3_entry = nullptr; - return false; - } - } + // bool KPageTableImpl::ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const { + // /* Set the L3 entry. */ + // out_context->l3_entry = l3_entry; + // + // if (l3_entry->IsBlock()) { + // /* Set the output entry. */ + // out_entry->phys_addr = l3_entry->GetBlock() + (virt_addr & (L3BlockSize - 1)); + // if (l3_entry->IsContiguous()) { + // out_entry->block_size = L3ContiguousBlockSize; + // } else { + // out_entry->block_size = L3BlockSize; + // } + // out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits(); + // out_entry->attr = 0; + // + // return true; + // } else { + // out_entry->phys_addr = Null; + // out_entry->block_size = L3BlockSize; + // out_entry->sw_reserved_bits = 0; + // out_entry->attr = 0; + // return false; + // } + // } + // + // bool KPageTableImpl::ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const { + // /* Set the L2 entry. */ + // out_context->l2_entry = l2_entry; + // + // if (l2_entry->IsBlock()) { + // /* Set the output entry. */ + // out_entry->phys_addr = l2_entry->GetBlock() + (virt_addr & (L2BlockSize - 1)); + // if (l2_entry->IsContiguous()) { + // out_entry->block_size = L2ContiguousBlockSize; + // } else { + // out_entry->block_size = L2BlockSize; + // } + // out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits(); + // out_entry->attr = 0; + // + // /* Set the output context. */ + // out_context->l3_entry = nullptr; + // return true; + // } else if (l2_entry->IsTable()) { + // return this->ExtractL3Entry(out_entry, out_context, this->GetL3EntryFromTable(GetPageTableVirtualAddress(l2_entry->GetTable()), virt_addr), virt_addr); + // } else { + // out_entry->phys_addr = Null; + // out_entry->block_size = L2BlockSize; + // out_entry->sw_reserved_bits = 0; + // out_entry->attr = 0; + // + // out_context->l3_entry = nullptr; + // return false; + // } + // } + // + // bool KPageTableImpl::ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const { + // /* Set the L1 entry. */ + // out_context->level_entries[EntryLevel_L1] = l1_entry; + // + // if (l1_entry->IsBlock()) { + // /* Set the output entry. */ + // out_entry->phys_addr = l1_entry->GetBlock() + (virt_addr & (L1BlockSize - 1)); + // if (l1_entry->IsContiguous()) { + // out_entry->block_size = L1ContiguousBlockSize; + // } else { + // out_entry->block_size = L1BlockSize; + // } + // out_entry->sw_reserved_bits = l1_entry->GetSoftwareReservedBits(); + // + // /* Set the output context. */ + // out_context->l2_entry = nullptr; + // out_context->l3_entry = nullptr; + // return true; + // } else if (l1_entry->IsTable()) { + // return this->ExtractL2Entry(out_entry, out_context, this->GetL2EntryFromTable(GetPageTableVirtualAddress(l1_entry->GetTable()), virt_addr), virt_addr); + // } else { + // out_entry->phys_addr = Null; + // out_entry->block_size = L1BlockSize; + // out_entry->sw_reserved_bits = 0; + // out_entry->attr = 0; + // + // out_context->l2_entry = nullptr; + // out_context->l3_entry = nullptr; + // return false; + // } + // } bool KPageTableImpl::BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const { /* Setup invalid defaults. */ - out_entry->phys_addr = Null; - out_entry->block_size = L1BlockSize; - out_entry->sw_reserved_bits = 0; - out_entry->attr = 0; - out_context->l1_entry = m_table + m_num_entries; - out_context->l2_entry = nullptr; - out_context->l3_entry = nullptr; + *out_entry = {}; + *out_context = {}; /* Validate that we can read the actual entry. */ const size_t l0_index = GetL0Index(address); @@ -146,125 +141,79 @@ namespace ams::kern::arch::arm64 { } } - /* Extract the entry. */ - const bool valid = this->ExtractL1Entry(out_entry, out_context, this->GetL1Entry(address), address); - - /* Update the context for next traversal. */ - switch (out_entry->block_size) { - case L1ContiguousBlockSize: - out_context->l1_entry += (L1ContiguousBlockSize / L1BlockSize) - GetContiguousL1Offset(address) / L1BlockSize; - break; - case L1BlockSize: - out_context->l1_entry += 1; - break; - case L2ContiguousBlockSize: - out_context->l1_entry += 1; - out_context->l2_entry += (L2ContiguousBlockSize / L2BlockSize) - GetContiguousL2Offset(address) / L2BlockSize; - break; - case L2BlockSize: - out_context->l1_entry += 1; - out_context->l2_entry += 1; - break; - case L3ContiguousBlockSize: - out_context->l1_entry += 1; - out_context->l2_entry += 1; - out_context->l3_entry += (L3ContiguousBlockSize / L3BlockSize) - GetContiguousL3Offset(address) / L3BlockSize; - break; - case L3BlockSize: - out_context->l1_entry += 1; - out_context->l2_entry += 1; - out_context->l3_entry += 1; - break; - MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + /* Get the L1 entry, and check if it's a table. */ + out_context->level_entries[EntryLevel_L1] = this->GetL1Entry(address); + if (out_context->level_entries[EntryLevel_L1]->IsMappedTable()) { + /* Get the L2 entry, and check if it's a table. */ + out_context->level_entries[EntryLevel_L2] = this->GetL2EntryFromTable(GetPageTableVirtualAddress(out_context->level_entries[EntryLevel_L1]->GetTable()), address); + if (out_context->level_entries[EntryLevel_L2]->IsMappedTable()) { + /* Get the L3 entry. */ + out_context->level_entries[EntryLevel_L3] = this->GetL3EntryFromTable(GetPageTableVirtualAddress(out_context->level_entries[EntryLevel_L2]->GetTable()), address); + + /* It's either a page or not. */ + out_context->level = EntryLevel_L3; + } else { + /* Not a L2 table, so possibly an L2 block. */ + out_context->level = EntryLevel_L2; + } + } else { + /* Not a L1 table, so possibly an L1 block. */ + out_context->level = EntryLevel_L1; } - return valid; + /* Determine other fields. */ + const auto *pte = out_context->level_entries[out_context->level]; + + out_context->is_contiguous = pte->IsContiguous(); + + out_entry->sw_reserved_bits = pte->GetSoftwareReservedBits(); + out_entry->attr = 0; + out_entry->phys_addr = this->GetBlock(pte, out_context->level) + this->GetOffset(address, out_context->level); + out_entry->block_size = static_cast(1) << (PageBits + LevelBits * out_context->level + 4 * out_context->is_contiguous); + + return out_context->level == EntryLevel_L3 ? pte->IsPage() : pte->IsBlock(); } bool KPageTableImpl::ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const { - bool valid = false; - - /* Check if we're not at the end of an L3 table. */ - if (!util::IsAligned(reinterpret_cast(context->l3_entry), PageSize)) { - valid = this->ExtractL3Entry(out_entry, context, context->l3_entry, Null); - - switch (out_entry->block_size) { - case L3ContiguousBlockSize: - context->l3_entry += (L3ContiguousBlockSize / L3BlockSize); - break; - case L3BlockSize: - context->l3_entry += 1; - break; - MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); - } - } else if (!util::IsAligned(reinterpret_cast(context->l2_entry), PageSize)) { - /* We're not at the end of an L2 table. */ - valid = this->ExtractL2Entry(out_entry, context, context->l2_entry, Null); - - switch (out_entry->block_size) { - case L2ContiguousBlockSize: - context->l2_entry += (L2ContiguousBlockSize / L2BlockSize); - break; - case L2BlockSize: - context->l2_entry += 1; - break; - case L3ContiguousBlockSize: - context->l2_entry += 1; - context->l3_entry += (L3ContiguousBlockSize / L3BlockSize); - break; - case L3BlockSize: - context->l2_entry += 1; - context->l3_entry += 1; - break; - MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); - } - } else { - /* We need to update the l1 entry. */ - const size_t l1_index = context->l1_entry - m_table; - if (l1_index < m_num_entries) { - valid = this->ExtractL1Entry(out_entry, context, context->l1_entry, Null); - } else { - /* Invalid, end traversal. */ - out_entry->phys_addr = Null; - out_entry->block_size = L1BlockSize; - out_entry->sw_reserved_bits = 0; - out_entry->attr = 0; - context->l1_entry = m_table + m_num_entries; - context->l2_entry = nullptr; - context->l3_entry = nullptr; + /* Advance entry. */ + + auto *cur_pte = context->level_entries[context->level]; + auto *next_pte = reinterpret_cast(context->is_contiguous ? util::AlignDown(reinterpret_cast(cur_pte), 0x10 * sizeof(PageTableEntry)) + 0x10 * sizeof(PageTableEntry) : reinterpret_cast(cur_pte) + sizeof(PageTableEntry)); + + /* Set the pte. */ + context->level_entries[context->level] = next_pte; + + /* Advance appropriately. */ + while (context->level < EntryLevel_L1 && util::IsAligned(reinterpret_cast(context->level_entries[context->level]), PageSize)) { + /* Advance the above table by one entry. */ + context->level_entries[context->level + 1]++; + context->level = static_cast(util::ToUnderlying(context->level) + 1); + } + + /* Check if we've hit the end of the L1 table. */ + if (context->level == EntryLevel_L1) { + if (context->level_entries[EntryLevel_L1] - static_cast(m_table) >= m_num_entries) { + *context = {}; + *out_entry = {}; return false; } + } - switch (out_entry->block_size) { - case L1ContiguousBlockSize: - context->l1_entry += (L1ContiguousBlockSize / L1BlockSize); - break; - case L1BlockSize: - context->l1_entry += 1; - break; - case L2ContiguousBlockSize: - context->l1_entry += 1; - context->l2_entry += (L2ContiguousBlockSize / L2BlockSize); - break; - case L2BlockSize: - context->l1_entry += 1; - context->l2_entry += 1; - break; - case L3ContiguousBlockSize: - context->l1_entry += 1; - context->l2_entry += 1; - context->l3_entry += (L3ContiguousBlockSize / L3BlockSize); - break; - case L3BlockSize: - context->l1_entry += 1; - context->l2_entry += 1; - context->l3_entry += 1; - break; - MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); - } + /* We may have advanced to a new table, and if we have we should descend. */ + while (context->level > EntryLevel_L3 && context->level_entries[context->level]->IsMappedTable()) { + context->level_entries[context->level - 1] = GetPointer(GetPageTableVirtualAddress(context->level_entries[context->level]->GetTable())); + context->level = static_cast(util::ToUnderlying(context->level) - 1); } - return valid; + const auto *pte = context->level_entries[context->level]; + + context->is_contiguous = pte->IsContiguous(); + + out_entry->sw_reserved_bits = pte->GetSoftwareReservedBits(); + out_entry->attr = 0; + out_entry->phys_addr = this->GetBlock(pte, context->level); + out_entry->block_size = static_cast(1) << (PageBits + LevelBits * context->level + 4 * context->is_contiguous); + return context->level == EntryLevel_L3 ? pte->IsPage() : pte->IsBlock(); } bool KPageTableImpl::GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const { @@ -283,32 +232,27 @@ namespace ams::kern::arch::arm64 { } } - /* Try to get from l1 table. */ - const L1PageTableEntry *l1_entry = this->GetL1Entry(address); - if (l1_entry->IsBlock()) { - *out = l1_entry->GetBlock() + GetL1Offset(address); - return true; - } else if (!l1_entry->IsTable()) { - return false; - } - - /* Try to get from l2 table. */ - const L2PageTableEntry *l2_entry = this->GetL2Entry(l1_entry, address); - if (l2_entry->IsBlock()) { - *out = l2_entry->GetBlock() + GetL2Offset(address); - return true; - } else if (!l2_entry->IsTable()) { - return false; + /* Get the L1 entry, and check if it's a table. */ + const PageTableEntry *pte = this->GetL1Entry(address); + EntryLevel level = EntryLevel_L1; + if (pte->IsMappedTable()) { + /* Get the L2 entry, and check if it's a table. */ + pte = this->GetL2EntryFromTable(GetPageTableVirtualAddress(pte->GetTable()), address); + level = EntryLevel_L2; + if (pte->IsMappedTable()) { + pte = this->GetL3EntryFromTable(GetPageTableVirtualAddress(pte->GetTable()), address); + level = EntryLevel_L3; + } } - /* Try to get from l3 table. */ - const L3PageTableEntry *l3_entry = this->GetL3Entry(l2_entry, address); - if (l3_entry->IsBlock()) { - *out = l3_entry->GetBlock() + GetL3Offset(address); - return true; + const bool is_block = level == EntryLevel_L3 ? pte->IsPage() : pte->IsBlock(); + if (is_block) { + *out = this->GetBlock(pte, level) + this->GetOffset(address, level); + } else { + *out = Null; } - return false; + return is_block; } void KPageTableImpl::Dump(uintptr_t start, size_t size) const { From 104be247dadb5887545758e9532a7a2178179674 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 12:58:15 -0700 Subject: [PATCH 25/35] kern: continue page table refactor, implement separate/unmap --- .../arch/arm64/kern_k_page_table.hpp | 7 +- .../arch/arm64/kern_k_page_table_entry.hpp | 69 +++- .../arch/arm64/kern_k_page_table_impl.hpp | 9 +- libraries/libmesosphere/libmesosphere.mk | 2 +- .../source/arch/arm64/kern_k_page_table.cpp | 325 +++++++----------- .../arch/arm64/kern_k_page_table_impl.cpp | 138 +++----- .../libmesosphere/source/kern_k_thread.cpp | 3 + 7 files changed, 248 insertions(+), 305 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp index 2ccd4b72ae..8446b6f1c7 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp @@ -233,8 +233,11 @@ namespace ams::kern::arch::arm64 { bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list); - ALWAYS_INLINE Result SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll); - Result SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll); + void MergePages(TraversalContext *context, PageLinkedList *page_list); + void MergePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list); + + Result SeparatePagesImpl(TraversalEntry *entry, TraversalContext *context, KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll); + Result SeparatePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool reuse_ll); Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, bool flush_mapping, PageLinkedList *page_list, bool reuse_ll); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp index 8941928edf..a87c7b5c5d 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp @@ -20,18 +20,22 @@ namespace ams::kern::arch::arm64 { + constexpr size_t BlocksPerContiguousBlock = 0x10; + constexpr size_t BlocksPerTable = PageSize / sizeof(u64); + constexpr size_t L1BlockSize = 1_GB; - constexpr size_t L1ContiguousBlockSize = 0x10 * L1BlockSize; + constexpr size_t L1ContiguousBlockSize = BlocksPerContiguousBlock * L1BlockSize; constexpr size_t L2BlockSize = 2_MB; - constexpr size_t L2ContiguousBlockSize = 0x10 * L2BlockSize; + constexpr size_t L2ContiguousBlockSize = BlocksPerContiguousBlock * L2BlockSize; constexpr size_t L3BlockSize = PageSize; - constexpr size_t L3ContiguousBlockSize = 0x10 * L3BlockSize; + constexpr size_t L3ContiguousBlockSize = BlocksPerContiguousBlock * L3BlockSize; class PageTableEntry { public: struct InvalidTag{}; struct TableTag{}; struct BlockTag{}; + struct SeparateContiguousTag{}; enum Permission : u64 { Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)), @@ -122,6 +126,25 @@ namespace ams::kern::arch::arm64 { { /* ... */ } + + /* Construct a table. */ + constexpr explicit ALWAYS_INLINE PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool is_kernel, bool pxn, size_t num_blocks) + : PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast(pxn) << 59) | GetInteger(phys_addr) | (num_blocks << 2) | 0x3) + { + /* ... */ + } + + /* Construct a block. */ + constexpr explicit ALWAYS_INLINE PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig, bool page) + : PageTableEntry(attr, (static_cast(sw_reserved_bits) << 55) | (static_cast(contig) << 52) | GetInteger(phys_addr) | (page ? ExtensionFlag_TestTableMask : ExtensionFlag_Valid)) + { + /* ... */ + } + constexpr explicit ALWAYS_INLINE PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, SeparateContiguousTag) + : PageTableEntry(attr, GetInteger(phys_addr)) + { + /* ... */ + } protected: constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const { return (m_attributes >> offset) & ((1ul << count) - 1); @@ -165,7 +188,7 @@ namespace ams::kern::arch::arm64 { constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast(this->SelectBits(8, 2)); } constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast(this->SelectBits(2, 3)); } constexpr ALWAYS_INLINE int GetAccessFlagInteger() const { return static_cast(this->GetBits(10, 1)); } - constexpr ALWAYS_INLINE int GetShareableInteger() const { return static_cast(this->GetBits(8, 2)); } + constexpr ALWAYS_INLINE int GetShareableInteger() const { return static_cast(this->GetBits(8, 2)); } constexpr ALWAYS_INLINE int GetPageAttributeInteger() const { return static_cast(this->GetBits(2, 3)); } constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; } constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; } @@ -194,6 +217,12 @@ namespace ams::kern::arch::arm64 { constexpr ALWAYS_INLINE decltype(auto) SetPageAttribute(PageAttribute a) { this->SetBitsDirect(2, 3, a); return *this; } constexpr ALWAYS_INLINE decltype(auto) SetMapped(bool m) { static_assert(static_cast(MappingFlag_Mapped == (1 << 0))); this->SetBit(0, m); return *this; } + constexpr ALWAYS_INLINE size_t GetTableNumEntries() const { return this->GetBits(2, 10); } + constexpr ALWAYS_INLINE decltype(auto) SetTableNumEntries(size_t num) { this->SetBits(2, 10, num); } + + constexpr ALWAYS_INLINE decltype(auto) AddTableEntries(size_t num) { return this->SetTableNumEntries(this->GetTableNumEntries() + num); } + constexpr ALWAYS_INLINE decltype(auto) RemoveTableEntries(size_t num) { return this->SetTableNumEntries(this->GetTableNumEntries() - num); } + constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const { constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail)); return m_attributes & BaseMask; @@ -204,6 +233,38 @@ namespace ams::kern::arch::arm64 { return (m_attributes & BaseMaskForMerge) == attr; } + static constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateContiguousMask(size_t idx) { + constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast((0x1ul << 52) | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail)); + if (idx == 0) { + return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody; + } else if (idx < BlocksPerContiguousBlock - 1) { + return BaseMask; + } else { + return BaseMask | ExtensionFlag_DisableMergeTail; + } + } + + constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateContiguous(size_t idx) const { + return m_attributes & GetEntryTemplateForSeparateContiguousMask(idx); + } + + static constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateMask(size_t idx) { + constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail)); + if (idx == 0) { + return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody; + } else if (idx < BlocksPerContiguousBlock) { + return BaseMask | ExtensionFlag_DisableMergeHeadAndBody; + } else if (idx < BlocksPerTable - 1) { + return BaseMask; + } else { + return BaseMask | ExtensionFlag_DisableMergeTail; + } + } + + constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparate(size_t idx) const { + return m_attributes & GetEntryTemplateForSeparateMask(idx); + } + constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafe() const { return m_attributes; } diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp index 8616ac0ddb..68b324f6e8 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp @@ -45,7 +45,7 @@ namespace ams::kern::arch::arm64 { }; struct TraversalContext { - const PageTableEntry *level_entries[EntryLevel_Count]; + PageTableEntry *level_entries[EntryLevel_Count]; EntryLevel level; bool is_contiguous; }; @@ -125,6 +125,10 @@ namespace ams::kern::arch::arm64 { ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KProcessAddress address) const { return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address); } + + static constexpr size_t GetBlockSize(EntryLevel level, bool contiguous = false) { + return 1 << (PageBits + LevelBits * level + 4 * contiguous); + } public: constexpr explicit KPageTableImpl(util::ConstantInitializeTag) : m_table(), m_is_kernel(), m_num_entries() { /* ... */ } @@ -141,6 +145,9 @@ namespace ams::kern::arch::arm64 { bool ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const; bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const; + + static bool MergePages(KVirtualAddress *out, TraversalContext *context); + void SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const; }; } diff --git a/libraries/libmesosphere/libmesosphere.mk b/libraries/libmesosphere/libmesosphere.mk index 32db706647..79c2c13edd 100644 --- a/libraries/libmesosphere/libmesosphere.mk +++ b/libraries/libmesosphere/libmesosphere.mk @@ -20,7 +20,7 @@ endif DEFINES := $(ATMOSPHERE_DEFINES) -DATMOSPHERE_IS_MESOSPHERE SETTINGS := $(ATMOSPHERE_SETTINGS) $(ATMOSPHERE_OPTIMIZATION_FLAG) -mgeneral-regs-only -ffixed-x18 -Wextra -Werror -fno-non-call-exceptions CFLAGS := $(ATMOSPHERE_CFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE) -CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit -flto +CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit ASFLAGS := $(ATMOSPHERE_ASFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE) SOURCES += $(foreach v,$(call ALL_SOURCE_DIRS,../libvapours/source),$(if $(findstring ../libvapours/source/sdmmc,$v),,$v)) diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index c1b0c76b4d..381f6c8802 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -280,18 +280,7 @@ namespace ams::kern::arch::arm64 { if (operation == OperationType_Unmap) { R_RETURN(this->Unmap(virt_addr, num_pages, page_list, false, reuse_ll)); } else if (operation == OperationType_Separate) { - const size_t size = num_pages * PageSize; - R_TRY(this->SeparatePages(virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll)); - ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); }; - - if (num_pages > 1) { - const auto end_page = virt_addr + size; - const auto last_page = end_page - PageSize; - - R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll)); - } - - R_SUCCEED(); + R_RETURN(this->SeparatePages(virt_addr, num_pages, page_list, reuse_ll)); } else { auto entry_template = this->GetEntryTemplate(properties); @@ -519,16 +508,7 @@ namespace ams::kern::arch::arm64 { /* If we're not forcing an unmap, separate pages immediately. */ if (!force) { - const size_t size = num_pages * PageSize; - R_TRY(this->SeparatePages(virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll)); - ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); }; - - if (num_pages > 1) { - const auto end_page = virt_addr + size; - const auto last_page = end_page - PageSize; - - R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll)); - } + R_TRY(this->SeparatePages(virt_addr, num_pages, page_list, reuse_ll)); } /* Cache initial addresses for use on cleanup. */ @@ -558,10 +538,7 @@ namespace ams::kern::arch::arm64 { /* Handle the case where the block is bigger than it should be. */ if (next_entry.block_size > remaining_pages * PageSize) { MESOSPHERE_ABORT_UNLESS(force); - MESOSPHERE_R_ABORT_UNLESS(this->SeparatePages(virt_addr, remaining_pages * PageSize, page_list, reuse_ll)); - const bool new_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr); - MESOSPHERE_ASSERT(new_valid); - MESOSPHERE_UNUSED(new_valid); + MESOSPHERE_R_ABORT_UNLESS(this->SeparatePagesImpl(std::addressof(next_entry), std::addressof(context), virt_addr, remaining_pages * PageSize, page_list, reuse_ll)); } /* Check that our state is coherent. */ @@ -569,87 +546,38 @@ namespace ams::kern::arch::arm64 { MESOSPHERE_ASSERT(util::IsAligned(GetInteger(next_entry.phys_addr), next_entry.block_size)); /* Unmap the block. */ - L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr); - switch (next_entry.block_size) { - case L1BlockSize: - { - /* Clear the entry. */ - *l1_entry = InvalidL1PageTableEntry; - } - break; - case L2ContiguousBlockSize: - case L2BlockSize: - { - /* Get the number of L2 blocks. */ - const size_t num_l2_blocks = next_entry.block_size / L2BlockSize; - - /* Get the L2 entry. */ - KPhysicalAddress l2_phys = Null; - MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys)); - const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys); - - /* Clear the entry. */ - for (size_t i = 0; i < num_l2_blocks; i++) { - *impl.GetL2EntryFromTable(l2_virt, virt_addr + L2BlockSize * i) = InvalidL2PageTableEntry; - } - PteDataMemoryBarrier(); + bool freeing_table = false; + while (true) { + /* Clear the entries. */ + const size_t num_to_clear = (!freeing_table && context.is_contiguous) ? BlocksPerContiguousBlock : 1; + auto *pte = reinterpret_cast(context.is_contiguous ? util::AlignDown(reinterpret_cast(context.level_entries[context.level]), BlocksPerContiguousBlock * sizeof(PageTableEntry)) : reinterpret_cast(context.level_entries[context.level])); + for (size_t i = 0; i < num_to_clear; ++i) { + pte[i] = InvalidPageTableEntry; + } - /* Close references to the L2 table. */ - if (this->GetPageTableManager().IsInPageTableHeap(l2_virt)) { - if (this->GetPageTableManager().Close(l2_virt, num_l2_blocks)) { - *l1_entry = InvalidL1PageTableEntry; - this->NoteUpdated(); - this->FreePageTable(page_list, l2_virt); - pages_to_close.CloseAndReset(); - } - } - } - break; - case L3ContiguousBlockSize: - case L3BlockSize: - { - /* Get the number of L3 blocks. */ - const size_t num_l3_blocks = next_entry.block_size / L3BlockSize; - - /* Get the L2 entry. */ - KPhysicalAddress l2_phys = Null; - MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys)); - const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys); - L2PageTableEntry *l2_entry = impl.GetL2EntryFromTable(l2_virt, virt_addr); - - /* Get the L3 entry. */ - KPhysicalAddress l3_phys = Null; - MESOSPHERE_ABORT_UNLESS(l2_entry->GetTable(l3_phys)); - const KVirtualAddress l3_virt = GetPageTableVirtualAddress(l3_phys); - - /* Clear the entry. */ - for (size_t i = 0; i < num_l3_blocks; i++) { - *impl.GetL3EntryFromTable(l3_virt, virt_addr + L3BlockSize * i) = InvalidL3PageTableEntry; - } - PteDataMemoryBarrier(); + /* Remove the entries from the previous table. */ + if (context.level != KPageTableImpl::EntryLevel_L1) { + context.level_entries[context.level + 1]->RemoveTableEntries(num_to_clear); + } - /* Close references to the L3 table. */ - if (this->GetPageTableManager().IsInPageTableHeap(l3_virt)) { - if (this->GetPageTableManager().Close(l3_virt, num_l3_blocks)) { - *l2_entry = InvalidL2PageTableEntry; - this->NoteUpdated(); + /* If we cleared a table, we need to note that we updated and free the table. */ + if (freeing_table) { + this->NoteUpdated(); + this->FreePageTable(page_list, KVirtualAddress(util::AlignDown(reinterpret_cast(context.level_entries[context.level - 1]), PageSize))); + } - /* Close reference to the L2 table. */ - if (this->GetPageTableManager().IsInPageTableHeap(l2_virt)) { - if (this->GetPageTableManager().Close(l2_virt, 1)) { - *l1_entry = InvalidL1PageTableEntry; - this->NoteUpdated(); - this->FreePageTable(page_list, l2_virt); - } - } - - this->FreePageTable(page_list, l3_virt); - pages_to_close.CloseAndReset(); - } - } - } + /* Advance; we're no longer contiguous. */ + context.is_contiguous = false; + context.level_entries[context.level] = pte + num_to_clear - 1; + + /* We may have removed the last entries in a table, in which case we can free an unmap the tables. */ + if (context.level >= KPageTableImpl::EntryLevel_L1 || context.level_entries[context.level + 1]->GetTableNumEntries() != 0) { break; - MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + + /* Advance; we will not be working with blocks any more. */ + context.level = static_cast(util::ToUnderlying(context.level) + 1); + freeing_table = true; } /* Close the blocks. */ @@ -663,8 +591,19 @@ namespace ams::kern::arch::arm64 { } /* Advance. */ - virt_addr += next_entry.block_size; - remaining_pages -= next_entry.block_size / PageSize; + size_t freed_size = next_entry.block_size; + if (freeing_table) { + /* We advanced more than by the block, so we need to calculate the actual advanced size. */ + const KProcessAddress new_virt_addr = util::AlignUp(GetInteger(virt_addr), impl.GetBlockSize(context.level, context.is_contiguous)); + MESOSPHERE_ABORT_UNLESS(new_virt_addr >= virt_addr + next_entry.block_size); + + freed_size = std::min(new_virt_addr - virt_addr, remaining_pages * PageSize); + } + + /* We can just advance by the block size. */ + virt_addr += freed_size; + remaining_pages -= freed_size / PageSize; + next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); } @@ -1032,142 +971,117 @@ namespace ams::kern::arch::arm64 { return merged; } - Result KPageTable::SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) { + void KPageTable::MergePages(TraversalContext *context, PageLinkedList *page_list) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); auto &impl = this->GetImpl(); - /* First, try to separate an L1 block into contiguous L2 blocks. */ - L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr); - if (l1_entry->IsBlock()) { - /* If our block size is too big, don't bother. */ - R_SUCCEED_IF(block_size >= L1BlockSize); - - /* Get the addresses we're working with. */ - const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L1BlockSize); - const KPhysicalAddress block_phys_addr = l1_entry->GetBlock(); - - /* Allocate a new page for the L2 table. */ - const KVirtualAddress l2_table = this->AllocatePageTable(page_list, reuse_ll); - R_UNLESS(l2_table != Null, svc::ResultOutOfResource()); - const KPhysicalAddress l2_phys = GetPageTablePhysicalAddress(l2_table); - - /* Set the entries in the L2 table. */ - for (size_t i = 0; i < L1BlockSize / L2BlockSize; i++) { - const u64 entry_template = l1_entry->GetEntryTemplateForL2Block(i); - *(impl.GetL2EntryFromTable(l2_table, block_virt_addr + L2BlockSize * i)) = L2PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L2BlockSize * i, PageTableEntry(entry_template), PageTableEntry::SoftwareReservedBit_None, true); + /* Iteratively merge, until we can't. */ + while (true) { + /* Try to merge. */ + KVirtualAddress freed_table = Null; + if (!impl.MergePages(std::addressof(freed_table), context)) { + break; } - /* Open references to the L2 table. */ - this->GetPageTableManager().Open(l2_table, L1BlockSize / L2BlockSize); - - /* Replace the L1 entry with one to the new table. */ - PteDataMemoryBarrier(); - *l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true); + /* Note that we updated. */ this->NoteUpdated(); - } - - /* If we don't have an l1 table, we're done. */ - MESOSPHERE_ABORT_UNLESS(l1_entry->IsTable() || l1_entry->IsEmpty()); - R_SUCCEED_IF(!l1_entry->IsTable()); - - /* We want to separate L2 contiguous blocks into L2 blocks, so check that our size permits that. */ - R_SUCCEED_IF(block_size >= L2ContiguousBlockSize); - L2PageTableEntry *l2_entry = impl.GetL2Entry(l1_entry, virt_addr); - if (l2_entry->IsBlock()) { - /* If we're contiguous, try to separate. */ - if (l2_entry->IsContiguous()) { - const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L2ContiguousBlockSize); - const KPhysicalAddress block_phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L2ContiguousBlockSize); - - /* Mark the entries as non-contiguous. */ - for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) { - L2PageTableEntry *target = impl.GetL2Entry(l1_entry, block_virt_addr + L2BlockSize * i); - const u64 entry_template = target->GetEntryTemplateForL2Block(i); - *target = L2PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L2BlockSize * i, PageTableEntry(entry_template), PageTableEntry::SoftwareReservedBit_None, false); - } - this->NoteUpdated(); - } + /* Free the page. */ + ClearPageTable(freed_table); + this->FreePageTable(page_list, freed_table); + } + } - /* We want to separate L2 blocks into L3 contiguous blocks, so check that our size permits that. */ - R_SUCCEED_IF(block_size >= L2BlockSize); + void KPageTable::MergePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - /* Get the addresses we're working with. */ - const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L2BlockSize); - const KPhysicalAddress block_phys_addr = l2_entry->GetBlock(); + auto &impl = this->GetImpl(); - /* Allocate a new page for the L3 table. */ - const KVirtualAddress l3_table = this->AllocatePageTable(page_list, reuse_ll); - R_UNLESS(l3_table != Null, svc::ResultOutOfResource()); - const KPhysicalAddress l3_phys = GetPageTablePhysicalAddress(l3_table); + /* Begin traversal. */ + TraversalContext context; + TraversalEntry entry; + MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(entry), std::addressof(context), virt_addr)); - /* Set the entries in the L3 table. */ - for (size_t i = 0; i < L2BlockSize / L3BlockSize; i++) { - const u64 entry_template = l2_entry->GetEntryTemplateForL3Block(i); - *(impl.GetL3EntryFromTable(l3_table, block_virt_addr + L3BlockSize * i)) = L3PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L3BlockSize * i, PageTableEntry(entry_template), PageTableEntry::SoftwareReservedBit_None, true); - } + /* Merge start of the range. */ + this->MergePages(std::addressof(context), page_list); - /* Open references to the L3 table. */ - this->GetPageTableManager().Open(l3_table, L2BlockSize / L3BlockSize); + /* If we have more than one page, do the same for the end of the range. */ + if (num_pages > 1) { + /* Begin traversal for end of range. */ + const size_t size = num_pages * PageSize; + const auto end_page = virt_addr + size; + const auto last_page = end_page - PageSize; + MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(entry), std::addressof(context), last_page)); - /* Replace the L2 entry with one to the new table. */ - PteDataMemoryBarrier(); - *l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, l3_phys, this->IsKernel(), true); - this->NoteUpdated(); + /* Merge. */ + this->MergePages(std::addressof(context), page_list); } + } - /* If we don't have an L3 table, we're done. */ - MESOSPHERE_ABORT_UNLESS(l2_entry->IsTable() || l2_entry->IsEmpty()); - R_SUCCEED_IF(!l2_entry->IsTable()); + Result KPageTable::SeparatePagesImpl(TraversalEntry *entry, TraversalContext *context, KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - /* We want to separate L3 contiguous blocks into L2 blocks, so check that our size permits that. */ - R_SUCCEED_IF(block_size >= L3ContiguousBlockSize); + auto &impl = this->GetImpl(); - /* If we're contiguous, try to separate. */ - L3PageTableEntry *l3_entry = impl.GetL3Entry(l2_entry, virt_addr); - if (l3_entry->IsBlock() && l3_entry->IsContiguous()) { - const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L3ContiguousBlockSize); - const KPhysicalAddress block_phys_addr = util::AlignDown(GetInteger(l3_entry->GetBlock()), L3ContiguousBlockSize); + /* If at any point we fail, we want to merge. */ + ON_RESULT_FAILURE { this->MergePages(context, page_list); }; - /* Mark the entries as non-contiguous. */ - for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) { - L3PageTableEntry *target = impl.GetL3Entry(l2_entry, block_virt_addr + L3BlockSize * i); - const u64 entry_template = target->GetEntryTemplateForL3Block(i); - *target = L3PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L3BlockSize * i, PageTableEntry(entry_template), PageTableEntry::SoftwareReservedBit_None, false); + /* Iterate, separating until our block size is small enough. */ + while (entry->block_size > block_size) { + /* If necessary, allocate a table. */ + KVirtualAddress table = Null; + if (!context->is_contiguous) { + table = this->AllocatePageTable(page_list, reuse_ll); + R_UNLESS(table != Null, svc::ResultOutOfResource()); } + + /* Separate. */ + impl.SeparatePages(entry, context, virt_addr, nullptr); this->NoteUpdated(); } - /* We're done! */ R_SUCCEED(); } - Result KPageTable::SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) { + Result KPageTable::SeparatePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool reuse_ll) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - /* If we fail while separating, re-merge. */ - ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); }; - - /* Try to separate pages. */ - R_RETURN(this->SeparatePagesImpl(virt_addr, block_size, page_list, reuse_ll)); - } + auto &impl = this->GetImpl(); - Result KPageTable::ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, bool flush_mapping, PageLinkedList *page_list, bool reuse_ll) { - MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + /* Begin traversal. */ + TraversalContext start_context; + TraversalEntry entry; + MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(entry), std::addressof(start_context), virt_addr)); - /* Separate pages before we change permissions. */ + /* Separate pages at the start of the range. */ const size_t size = num_pages * PageSize; - R_TRY(this->SeparatePages(virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll)); + R_TRY(this->SeparatePagesImpl(std::addressof(entry), std::addressof(start_context), virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll)); + + /* If necessary, separate pages at the end of the range. */ if (num_pages > 1) { const auto end_page = virt_addr + size; const auto last_page = end_page - PageSize; - ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); }; + /* Begin traversal. */ + TraversalContext end_context; + MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(entry), std::addressof(end_context), last_page)); + + + ON_RESULT_FAILURE { this->MergePages(std::addressof(start_context), page_list); }; - R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll)); + R_TRY(this->SeparatePagesImpl(std::addressof(entry), std::addressof(end_context), last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll)); } + R_SUCCEED(); + } + + Result KPageTable::ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, bool flush_mapping, PageLinkedList *page_list, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + /* Separate pages before we change permissions. */ + R_TRY(this->SeparatePages(virt_addr, num_pages, page_list, reuse_ll)); + /* ===================================================== */ /* Define a helper function which will apply our template to entries. */ @@ -1376,10 +1290,7 @@ namespace ams::kern::arch::arm64 { } /* We've succeeded, now perform what coalescing we can. */ - this->MergePages(virt_addr, page_list); - if (num_pages > 1) { - this->MergePages(virt_addr + (num_pages - 1) * PageSize, page_list); - } + this->MergePages(virt_addr, num_pages, page_list); R_SUCCEED(); } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp index 5bc775f664..5f03bb83c3 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -33,94 +33,6 @@ namespace ams::kern::arch::arm64 { return m_table; } - // bool KPageTableImpl::ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const { - // /* Set the L3 entry. */ - // out_context->l3_entry = l3_entry; - // - // if (l3_entry->IsBlock()) { - // /* Set the output entry. */ - // out_entry->phys_addr = l3_entry->GetBlock() + (virt_addr & (L3BlockSize - 1)); - // if (l3_entry->IsContiguous()) { - // out_entry->block_size = L3ContiguousBlockSize; - // } else { - // out_entry->block_size = L3BlockSize; - // } - // out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits(); - // out_entry->attr = 0; - // - // return true; - // } else { - // out_entry->phys_addr = Null; - // out_entry->block_size = L3BlockSize; - // out_entry->sw_reserved_bits = 0; - // out_entry->attr = 0; - // return false; - // } - // } - // - // bool KPageTableImpl::ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const { - // /* Set the L2 entry. */ - // out_context->l2_entry = l2_entry; - // - // if (l2_entry->IsBlock()) { - // /* Set the output entry. */ - // out_entry->phys_addr = l2_entry->GetBlock() + (virt_addr & (L2BlockSize - 1)); - // if (l2_entry->IsContiguous()) { - // out_entry->block_size = L2ContiguousBlockSize; - // } else { - // out_entry->block_size = L2BlockSize; - // } - // out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits(); - // out_entry->attr = 0; - // - // /* Set the output context. */ - // out_context->l3_entry = nullptr; - // return true; - // } else if (l2_entry->IsTable()) { - // return this->ExtractL3Entry(out_entry, out_context, this->GetL3EntryFromTable(GetPageTableVirtualAddress(l2_entry->GetTable()), virt_addr), virt_addr); - // } else { - // out_entry->phys_addr = Null; - // out_entry->block_size = L2BlockSize; - // out_entry->sw_reserved_bits = 0; - // out_entry->attr = 0; - // - // out_context->l3_entry = nullptr; - // return false; - // } - // } - // - // bool KPageTableImpl::ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const { - // /* Set the L1 entry. */ - // out_context->level_entries[EntryLevel_L1] = l1_entry; - // - // if (l1_entry->IsBlock()) { - // /* Set the output entry. */ - // out_entry->phys_addr = l1_entry->GetBlock() + (virt_addr & (L1BlockSize - 1)); - // if (l1_entry->IsContiguous()) { - // out_entry->block_size = L1ContiguousBlockSize; - // } else { - // out_entry->block_size = L1BlockSize; - // } - // out_entry->sw_reserved_bits = l1_entry->GetSoftwareReservedBits(); - // - // /* Set the output context. */ - // out_context->l2_entry = nullptr; - // out_context->l3_entry = nullptr; - // return true; - // } else if (l1_entry->IsTable()) { - // return this->ExtractL2Entry(out_entry, out_context, this->GetL2EntryFromTable(GetPageTableVirtualAddress(l1_entry->GetTable()), virt_addr), virt_addr); - // } else { - // out_entry->phys_addr = Null; - // out_entry->block_size = L1BlockSize; - // out_entry->sw_reserved_bits = 0; - // out_entry->attr = 0; - // - // out_context->l2_entry = nullptr; - // out_context->l3_entry = nullptr; - // return false; - // } - // } - bool KPageTableImpl::BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const { /* Setup invalid defaults. */ *out_entry = {}; @@ -176,9 +88,8 @@ namespace ams::kern::arch::arm64 { bool KPageTableImpl::ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const { /* Advance entry. */ - auto *cur_pte = context->level_entries[context->level]; - auto *next_pte = reinterpret_cast(context->is_contiguous ? util::AlignDown(reinterpret_cast(cur_pte), 0x10 * sizeof(PageTableEntry)) + 0x10 * sizeof(PageTableEntry) : reinterpret_cast(cur_pte) + sizeof(PageTableEntry)); + auto *next_pte = reinterpret_cast(context->is_contiguous ? util::AlignDown(reinterpret_cast(cur_pte), BlocksPerContiguousBlock * sizeof(PageTableEntry)) + BlocksPerContiguousBlock * sizeof(PageTableEntry) : reinterpret_cast(cur_pte) + sizeof(PageTableEntry)); /* Set the pte. */ context->level_entries[context->level] = next_pte; @@ -255,6 +166,53 @@ namespace ams::kern::arch::arm64 { return is_block; } + bool KPageTableImpl::MergePages(KVirtualAddress *out, TraversalContext *context) { + /* TODO */ + MESOSPHERE_UNUSED(out, context); + MESOSPHERE_PANIC("page tables"); + } + + void KPageTableImpl::SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const { + /* We want to downgrade the pages by one step. */ + if (context->is_contiguous) { + /* We want to downgrade a contiguous mapping to a non-contiguous mapping. */ + pte = reinterpret_cast(util::AlignDown(reinterpret_cast(context->level_entries[context->level]), BlocksPerContiguousBlock * sizeof(PageTableEntry))); + + auto * const first = pte; + const KPhysicalAddress block = this->GetBlock(first, context->level); + for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) { + pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * context->level)), PageTableEntry(first->GetEntryTemplateForSeparateContiguous(i)), PageTableEntry::SeparateContiguousTag{}); + } + + context->is_contiguous = false; + + context->level_entries[context->level] = pte + (this->GetLevelIndex(address, context->level) & (BlocksPerContiguousBlock - 1)); + } else { + /* We want to downgrade a block into a table. */ + auto * const first = context->level_entries[context->level]; + const KPhysicalAddress block = this->GetBlock(first, context->level); + for (size_t i = 0; i < BlocksPerTable; ++i) { + pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * (context->level - 1))), PageTableEntry(first->GetEntryTemplateForSeparate(i)), PageTableEntry::SoftwareReservedBit_None, true, context->level == EntryLevel_L3); + } + + context->is_contiguous = true; + context->level = static_cast(util::ToUnderlying(context->level) - 1); + + /* Wait for pending stores to complete. */ + cpu::DataSynchronizationBarrierInnerShareableStore(); + + /* Update the block entry to be a table entry. */ + *context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::TableTag{}, KPageTable::GetPageTablePhysicalAddress(KVirtualAddress(first)), m_is_kernel, true, BlocksPerTable); + + context->level_entries[context->level] = pte + this->GetLevelIndex(address, context->level); + } + + entry->sw_reserved_bits = 0; + entry->attr = 0; + entry->phys_addr = this->GetBlock(context->level_entries[context->level], context->level) + this->GetOffset(address, context->level); + entry->block_size = static_cast(1) << (PageBits + LevelBits * context->level + 4 * context->is_contiguous); + } + void KPageTableImpl::Dump(uintptr_t start, size_t size) const { /* If zero size, there's nothing to dump. */ if (size == 0) { diff --git a/libraries/libmesosphere/source/kern_k_thread.cpp b/libraries/libmesosphere/source/kern_k_thread.cpp index 4ec7aa1dd9..74b39acd3d 100644 --- a/libraries/libmesosphere/source/kern_k_thread.cpp +++ b/libraries/libmesosphere/source/kern_k_thread.cpp @@ -1438,7 +1438,10 @@ namespace ams::kern { this->SetState(ThreadState_Waiting); /* Set our wait queue. */ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wdangling-pointer" m_wait_queue = queue; + #pragma GCC diagnostic pop } void KThread::NotifyAvailable(KSynchronizationObject *signaled_object, Result wait_result) { From e2f068548b4d7cfbc4f69667528f817e835bd1e7 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 13:25:37 -0700 Subject: [PATCH 26/35] kern: implement KPageTableImpl merge --- .../arch/arm64/kern_k_page_table_entry.hpp | 2 +- .../arch/arm64/kern_k_page_table_impl.cpp | 75 ++++++++++++++++++- 2 files changed, 73 insertions(+), 4 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp index a87c7b5c5d..d3ac790894 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp @@ -224,7 +224,7 @@ namespace ams::kern::arch::arm64 { constexpr ALWAYS_INLINE decltype(auto) RemoveTableEntries(size_t num) { return this->SetTableNumEntries(this->GetTableNumEntries() - num); } constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const { - constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail)); + constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail)); return m_attributes & BaseMask; } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp index 5f03bb83c3..249024a290 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -167,9 +167,78 @@ namespace ams::kern::arch::arm64 { } bool KPageTableImpl::MergePages(KVirtualAddress *out, TraversalContext *context) { - /* TODO */ - MESOSPHERE_UNUSED(out, context); - MESOSPHERE_PANIC("page tables"); + /* We want to upgrade the pages by one step. */ + if (context->is_contiguous) { + /* We can't merge an L1 table. */ + if (context->level == EntryLevel_L1) { + return false; + } + + /* We want to upgrade a contiguous mapping in a table to a block. */ + PageTableEntry *pte = reinterpret_cast(util::AlignDown(reinterpret_cast(context->level_entries[context->level]), BlocksPerTable * sizeof(PageTableEntry))); + const KPhysicalAddress phys_addr = GetBlock(pte, context->level); + + /* First, check that all entries are valid for us to merge. */ + const u64 entry_template = pte->GetEntryTemplateForMerge(); + for (size_t i = 0; i < BlocksPerTable; ++i) { + if (!pte[i].IsForMerge(entry_template | GetInteger(phys_addr + (i << (PageBits + LevelBits * context->level))) | PageTableEntry::ContigType_Contiguous | pte->GetTestTableMask())) { + return false; + } + if (i > 0 && pte[i].IsHeadOrHeadAndBodyMergeDisabled()) { + return false; + } + if (i < BlocksPerTable - 1 && pte[i].IsTailMergeDisabled()) { + return false; + } + } + + /* The entries are valid for us to merge, so merge them. */ + const auto *head_pte = pte; + const auto *tail_pte = pte + BlocksPerTable - 1; + const auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_pte->IsHeadMergeDisabled(), head_pte->IsHeadAndBodyMergeDisabled(), tail_pte->IsTailMergeDisabled()); + + *context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false, false); + + /* Update our context. */ + context->is_contiguous = false; + context->level = static_cast(util::ToUnderlying(context->level) + 1); + + /* Set the output to the table we just freed. */ + *out = KVirtualAddress(pte); + } else { + /* We want to upgrade a non-contiguous mapping to a contiguous mapping. */ + PageTableEntry *pte = reinterpret_cast(util::AlignDown(reinterpret_cast(context->level_entries[context->level]), BlocksPerContiguousBlock * sizeof(PageTableEntry))); + const KPhysicalAddress phys_addr = GetBlock(pte, context->level); + + /* First, check that all entries are valid for us to merge. */ + const u64 entry_template = pte->GetEntryTemplateForMerge(); + for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) { + if (!pte[i].IsForMerge(entry_template | GetInteger(phys_addr + (i << (PageBits + LevelBits * context->level))) | pte->GetTestTableMask())) { + return false; + } + if (i > 0 && pte[i].IsHeadOrHeadAndBodyMergeDisabled()) { + return false; + } + if (i < BlocksPerContiguousBlock - 1 && pte[i].IsTailMergeDisabled()) { + return false; + } + } + + /* The entries are valid for us to merge, so merge them. */ + const auto *head_pte = pte; + const auto *tail_pte = pte + BlocksPerContiguousBlock - 1; + const auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_pte->IsHeadMergeDisabled(), head_pte->IsHeadAndBodyMergeDisabled(), tail_pte->IsTailMergeDisabled()); + + for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) { + pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr + (i << (PageBits + LevelBits * context->level)), PageTableEntry(entry_template), sw_reserved_bits, true, context->level == EntryLevel_L3); + } + + /* Update our context. */ + context->level_entries[context->level] = pte; + context->is_contiguous = true; + } + + return true; } void KPageTableImpl::SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const { From 170688ef306d99d1036505769ddd468dce85b5ff Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 13:39:08 -0700 Subject: [PATCH 27/35] kern: use new merge pages api --- .../arch/arm64/kern_k_page_table.hpp | 4 +- .../source/arch/arm64/kern_k_page_table.cpp | 214 ++---------------- 2 files changed, 15 insertions(+), 203 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp index 8446b6f1c7..e4fca0dd58 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp @@ -231,9 +231,7 @@ namespace ams::kern::arch::arm64 { Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll); Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll); - bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list); - - void MergePages(TraversalContext *context, PageLinkedList *page_list); + bool MergePages(TraversalContext *context, PageLinkedList *page_list); void MergePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list); Result SeparatePagesImpl(TraversalEntry *entry, TraversalContext *context, KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index 381f6c8802..e62114d608 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -678,13 +678,7 @@ namespace ams::kern::arch::arm64 { } /* Perform what coalescing we can. */ - this->MergePages(orig_virt_addr, page_list); - if (num_pages > 1) { - this->MergePages(orig_virt_addr + (num_pages - 1) * PageSize, page_list); - } - - /* Wait for pending stores to complete. */ - cpu::DataSynchronizationBarrierInnerShareableStore(); + this->MergePages(orig_virt_addr, num_pages, page_list); /* Open references to the pages, if we should. */ if (IsHeapPhysicalAddress(orig_phys_addr)) { @@ -776,207 +770,20 @@ namespace ams::kern::arch::arm64 { MESOSPHERE_ASSERT(mapped_pages == num_pages); /* Perform what coalescing we can. */ - this->MergePages(orig_virt_addr, page_list); - if (num_pages > 1) { - this->MergePages(orig_virt_addr + (num_pages - 1) * PageSize, page_list); - } - - /* Wait for pending stores to complete. */ - cpu::DataSynchronizationBarrierInnerShareableStore(); + this->MergePages(orig_virt_addr, num_pages, page_list); /* We succeeded! We want to persist the reference to the pages. */ spg.CancelClose(); R_SUCCEED(); } - bool KPageTable::MergePages(KProcessAddress virt_addr, PageLinkedList *page_list) { - MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - - auto &impl = this->GetImpl(); - bool merged = false; - - /* If there's no L1 table, don't bother. */ - L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr); - if (!l1_entry->IsTable()) { - /* Ensure the table is not corrupted. */ - MESOSPHERE_ABORT_UNLESS(l1_entry->IsBlock() || l1_entry->IsEmpty()); - return merged; - } - - /* Examine and try to merge the L2 table. */ - L2PageTableEntry *l2_entry = impl.GetL2Entry(l1_entry, virt_addr); - if (l2_entry->IsTable()) { - /* We have an L3 entry. */ - L3PageTableEntry *l3_entry = impl.GetL3Entry(l2_entry, virt_addr); - if (!l3_entry->IsBlock()) { - return merged; - } - - /* If it's not contiguous, try to make it so. */ - if (!l3_entry->IsContiguous()) { - virt_addr = util::AlignDown(GetInteger(virt_addr), L3ContiguousBlockSize); - const KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l3_entry->GetBlock()), L3ContiguousBlockSize); - const u64 entry_template = l3_entry->GetEntryTemplateForMerge(); - - /* Validate that we can merge. */ - for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) { - const L3PageTableEntry *check_entry = impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i); - if (!check_entry->IsForMerge(entry_template | GetInteger(phys_addr + L3BlockSize * i) | PageTableEntry::Type_L3Block)) { - return merged; - } - if (i > 0 && (check_entry->IsHeadOrHeadAndBodyMergeDisabled())) { - return merged; - } - if ((i < (L3ContiguousBlockSize / L3BlockSize) - 1) && check_entry->IsTailMergeDisabled()) { - return merged; - } - } - - /* Determine the new software reserved bits. */ - const L3PageTableEntry *head_entry = impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * 0); - const L3PageTableEntry *tail_entry = impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * ((L3ContiguousBlockSize / L3BlockSize) - 1)); - auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_entry->IsHeadMergeDisabled(), head_entry->IsHeadAndBodyMergeDisabled(), tail_entry->IsTailMergeDisabled()); - - /* Merge! */ - for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) { - *impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i) = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr + L3BlockSize * i, PageTableEntry(entry_template), sw_reserved_bits, true); - sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); - } - - /* Note that we updated. */ - this->NoteUpdated(); - merged = true; - } - - /* We might be able to upgrade a contiguous set of L3 entries into an L2 block. */ - virt_addr = util::AlignDown(GetInteger(virt_addr), L2BlockSize); - KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l3_entry->GetBlock()), L2BlockSize); - const u64 entry_template = l3_entry->GetEntryTemplateForMerge(); - - /* Validate that we can merge. */ - for (size_t i = 0; i < L2BlockSize / L3ContiguousBlockSize; i++) { - const L3PageTableEntry *check_entry = impl.GetL3Entry(l2_entry, virt_addr + L3ContiguousBlockSize * i); - if (!check_entry->IsForMerge(entry_template | GetInteger(phys_addr + L3ContiguousBlockSize * i) | PageTableEntry::ContigType_Contiguous | PageTableEntry::Type_L3Block)) { - return merged; - } - if (i > 0 && (check_entry->IsHeadOrHeadAndBodyMergeDisabled())) { - return merged; - } - if ((i < (L2BlockSize / L3ContiguousBlockSize) - 1) && check_entry->IsTailMergeDisabled()) { - return merged; - } - } - - /* Determine the new software reserved bits. */ - const L3PageTableEntry *head_entry = impl.GetL3Entry(l2_entry, virt_addr + L3ContiguousBlockSize * 0); - const L3PageTableEntry *tail_entry = impl.GetL3Entry(l2_entry, virt_addr + L3ContiguousBlockSize * ((L2BlockSize / L3ContiguousBlockSize) - 1)); - auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_entry->IsHeadMergeDisabled(), head_entry->IsHeadAndBodyMergeDisabled(), tail_entry->IsTailMergeDisabled()); - - /* Merge! */ - *l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false); - - /* Note that we updated. */ - this->NoteUpdated(); - merged = true; - - /* Free the L3 table. */ - KVirtualAddress l3_table = util::AlignDown(reinterpret_cast(l3_entry), PageSize); - if (this->GetPageTableManager().IsInPageTableHeap(l3_table)) { - this->GetPageTableManager().Close(l3_table, L2BlockSize / L3BlockSize); - ClearPageTable(l3_table); - this->FreePageTable(page_list, l3_table); - } - } - - /* If the l2 entry is not a block or we can't make it contiguous, we're done. */ - if (!l2_entry->IsBlock()) { - return merged; - } - - /* If it's not contiguous, try to make it so. */ - if (!l2_entry->IsContiguous()) { - virt_addr = util::AlignDown(GetInteger(virt_addr), L2ContiguousBlockSize); - KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L2ContiguousBlockSize); - const u64 entry_template = l2_entry->GetEntryTemplateForMerge(); - - /* Validate that we can merge. */ - for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) { - const L2PageTableEntry *check_entry = impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * i); - if (!check_entry->IsForMerge(entry_template | GetInteger(phys_addr + L2BlockSize * i) | PageTableEntry::Type_L2Block)) { - return merged; - } - if (i > 0 && (check_entry->IsHeadOrHeadAndBodyMergeDisabled())) { - return merged; - } - if ((i < (L2ContiguousBlockSize / L2BlockSize) - 1) && check_entry->IsTailMergeDisabled()) { - return merged; - } - } - - /* Determine the new software reserved bits. */ - const L2PageTableEntry *head_entry = impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * 0); - const L2PageTableEntry *tail_entry = impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * ((L2ContiguousBlockSize / L2BlockSize) - 1)); - auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_entry->IsHeadMergeDisabled(), head_entry->IsHeadAndBodyMergeDisabled(), tail_entry->IsTailMergeDisabled()); - - /* Merge! */ - for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) { - *impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * i) = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr + L2BlockSize * i, PageTableEntry(entry_template), sw_reserved_bits, true); - sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); - } - - /* Note that we updated. */ - this->NoteUpdated(); - merged = true; - } - - /* We might be able to upgrade a contiguous set of L2 entries into an L1 block. */ - virt_addr = util::AlignDown(GetInteger(virt_addr), L1BlockSize); - KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L1BlockSize); - const u64 entry_template = l2_entry->GetEntryTemplateForMerge(); - - /* Validate that we can merge. */ - for (size_t i = 0; i < L1BlockSize / L2ContiguousBlockSize; i++) { - const L2PageTableEntry *check_entry = impl.GetL2Entry(l1_entry, virt_addr + L2ContiguousBlockSize * i); - if (!check_entry->IsForMerge(entry_template | GetInteger(phys_addr + L2ContiguousBlockSize * i) | PageTableEntry::ContigType_Contiguous | PageTableEntry::Type_L2Block)) { - return merged; - } - if (i > 0 && (check_entry->IsHeadOrHeadAndBodyMergeDisabled())) { - return merged; - } - if ((i < (L1ContiguousBlockSize / L2ContiguousBlockSize) - 1) && check_entry->IsTailMergeDisabled()) { - return merged; - } - } - - /* Determine the new software reserved bits. */ - const L2PageTableEntry *head_entry = impl.GetL2Entry(l1_entry, virt_addr + L2ContiguousBlockSize * 0); - const L2PageTableEntry *tail_entry = impl.GetL2Entry(l1_entry, virt_addr + L2ContiguousBlockSize * ((L1BlockSize / L2ContiguousBlockSize) - 1)); - auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_entry->IsHeadMergeDisabled(), head_entry->IsHeadAndBodyMergeDisabled(), tail_entry->IsTailMergeDisabled()); - - /* Merge! */ - *l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false); - - /* Note that we updated. */ - this->NoteUpdated(); - merged = true; - - /* Free the L2 table. */ - KVirtualAddress l2_table = util::AlignDown(reinterpret_cast(l2_entry), PageSize); - if (this->GetPageTableManager().IsInPageTableHeap(l2_table)) { - this->GetPageTableManager().Close(l2_table, L1BlockSize / L2BlockSize); - ClearPageTable(l2_table); - this->FreePageTable(page_list, l2_table); - } - - return merged; - } - - void KPageTable::MergePages(TraversalContext *context, PageLinkedList *page_list) { + bool KPageTable::MergePages(TraversalContext *context, PageLinkedList *page_list) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); auto &impl = this->GetImpl(); /* Iteratively merge, until we can't. */ + bool merged = false; while (true) { /* Try to merge. */ KVirtualAddress freed_table = Null; @@ -988,9 +795,16 @@ namespace ams::kern::arch::arm64 { this->NoteUpdated(); /* Free the page. */ - ClearPageTable(freed_table); - this->FreePageTable(page_list, freed_table); + if (freed_table != Null) { + ClearPageTable(freed_table); + this->FreePageTable(page_list, freed_table); + } + + /* We performed at least one merge. */ + merged = true; } + + return merged; } void KPageTable::MergePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list) { @@ -1231,7 +1045,7 @@ namespace ams::kern::arch::arm64 { if (util::IsAligned(GetInteger(cur_virt_addr) + next_entry.block_size, larger_align)) { const uintptr_t aligned_start = util::AlignDown(GetInteger(cur_virt_addr), larger_align); if (orig_virt_addr <= aligned_start && aligned_start + larger_align - 1 < GetInteger(orig_virt_addr) + (num_pages * PageSize) - 1) { - merge = this->MergePages(cur_virt_addr, page_list); + merge = this->MergePages(std::addressof(context), page_list); } else { merge = false; } From 2749ac416e7379e55fd92eb4bc491564c07b8d90 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 14:00:15 -0700 Subject: [PATCH 28/35] kern: update KPageTable::Finalize for the refactor --- .../source/arch/arm64/kern_k_page_table.cpp | 127 +++++++++--------- 1 file changed, 67 insertions(+), 60 deletions(-) diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index e62114d608..d34c98b69f 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -174,79 +174,85 @@ namespace ams::kern::arch::arm64 { /* Traverse, freeing all pages. */ { - /* Get the address space size. */ - const size_t as_size = this->GetAddressSpaceSize(); - /* Begin the traversal. */ TraversalContext context; - TraversalEntry cur_entry = { .phys_addr = Null, .block_size = 0, .sw_reserved_bits = 0, .attr = 0 }; - bool cur_valid = false; - TraversalEntry next_entry; - bool next_valid; - size_t tot_size = 0; + TraversalEntry entry; - next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), this->GetAddressSpaceStart()); + KPhysicalAddress cur_phys_addr = Null; + size_t cur_size = 0; + u8 has_attr = 0; - /* Iterate over entries. */ + bool cur_valid = impl.BeginTraversal(std::addressof(entry), std::addressof(context), this->GetAddressSpaceStart()); while (true) { - /* NOTE: Nintendo really does check next_entry.attr == (cur_entry.attr != 0)...but attr is always zero as of 18.0.0, and this is "probably" for the new console or debug-only anyway, */ - /* so we'll implement the weird logic verbatim even though it doesn't match the GetContiguousRange logic. */ - if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size && next_entry.attr == (cur_entry.attr ? 1 : 0))) { - cur_entry.block_size += next_entry.block_size; - } else { - if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) { - mm.Close(cur_entry.phys_addr, cur_entry.block_size / PageSize); + if (cur_valid) { + /* Free the actual pages, if there are any. */ + if (IsHeapPhysicalAddressForFinalize(entry.phys_addr)) { + if (cur_size > 0) { + /* NOTE: Nintendo really does check next_entry.attr == (cur_entry.attr != 0)...but attr is always zero as of 18.0.0, and this is "probably" for the new console or debug-only anyway, */ + /* so we'll implement the weird logic verbatim even though it doesn't match the GetContiguousRange logic. */ + if (entry.phys_addr == cur_phys_addr + cur_size && entry.attr == has_attr) { + /* Just extend the block, since we can. */ + cur_size += entry.block_size; + } else { + /* Close the block, and begin tracking anew. */ + mm.Close(cur_phys_addr, cur_size / PageSize); + + cur_phys_addr = entry.phys_addr; + cur_size = entry.block_size; + has_attr = entry.attr != 0; + } + } else { + cur_phys_addr = entry.phys_addr; + cur_size = entry.block_size; + has_attr = entry.attr != 0; + } } - /* Update tracking variables. */ - tot_size += cur_entry.block_size; - cur_entry = next_entry; - cur_valid = next_valid; - } + /* Clean up the page table entries. */ + bool freeing_table = false; + while (true) { + /* Clear the entries. */ + const size_t num_to_clear = (!freeing_table && context.is_contiguous) ? BlocksPerContiguousBlock : 1; + auto *pte = reinterpret_cast(context.is_contiguous ? util::AlignDown(reinterpret_cast(context.level_entries[context.level]), BlocksPerContiguousBlock * sizeof(PageTableEntry)) : reinterpret_cast(context.level_entries[context.level])); + for (size_t i = 0; i < num_to_clear; ++i) { + pte[i] = InvalidPageTableEntry; + } - if (cur_entry.block_size + tot_size >= as_size) { - break; - } + /* Remove the entries from the previous table. */ + if (context.level != KPageTableImpl::EntryLevel_L1) { + context.level_entries[context.level + 1]->RemoveTableEntries(num_to_clear); + } - next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); - } + /* If we cleared a table, we need to note that we updated and free the table. */ + if (freeing_table) { + KVirtualAddress table = KVirtualAddress(util::AlignDown(reinterpret_cast(context.level_entries[context.level - 1]), PageSize)); + ClearPageTable(table); + this->GetPageTableManager().Free(table); + } - /* Handle the last block. */ - if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) { - mm.Close(cur_entry.phys_addr, cur_entry.block_size / PageSize); - } - } + /* Advance; we're no longer contiguous. */ + context.is_contiguous = false; + context.level_entries[context.level] = pte + num_to_clear - 1; - /* Cache address space extents for convenience. */ - const KProcessAddress as_start = this->GetAddressSpaceStart(); - const KProcessAddress as_last = as_start + this->GetAddressSpaceSize() - 1; - - /* Free all L3 tables. */ - for (KProcessAddress cur_address = as_start; cur_address <= as_last; cur_address += L2BlockSize) { - L1PageTableEntry *l1_entry = impl.GetL1Entry(cur_address); - if (l1_entry->IsTable()) { - L2PageTableEntry *l2_entry = impl.GetL2Entry(l1_entry, cur_address); - if (l2_entry->IsTable()) { - const KVirtualAddress l3_table = GetPageTableVirtualAddress(l2_entry->GetTable()); - if (this->GetPageTableManager().IsInPageTableHeap(l3_table)) { - while (!this->GetPageTableManager().Close(l3_table, 1)) { /* ... */ } - ClearPageTable(l3_table); - this->GetPageTableManager().Free(l3_table); + /* We may have removed the last entries in a table, in which case we can free and unmap the tables. */ + if (context.level >= KPageTableImpl::EntryLevel_L1 || context.level_entries[context.level + 1]->GetTableNumEntries() != 0) { + break; + } + + /* Advance; we will not be working with blocks any more. */ + context.level = static_cast(util::ToUnderlying(context.level) + 1); + freeing_table = true; } + } + + /* Continue the traversal. */ + cur_valid = impl.ContinueTraversal(std::addressof(entry), std::addressof(context)); } - } - /* Free all L2 tables. */ - for (KProcessAddress cur_address = as_start; cur_address <= as_last; cur_address += L1BlockSize) { - L1PageTableEntry *l1_entry = impl.GetL1Entry(cur_address); - if (l1_entry->IsTable()) { - const KVirtualAddress l2_table = GetPageTableVirtualAddress(l1_entry->GetTable()); - if (this->GetPageTableManager().IsInPageTableHeap(l2_table)) { - while (!this->GetPageTableManager().Close(l2_table, 1)) { /* ... */ } - ClearPageTable(l2_table); - this->GetPageTableManager().Free(l2_table); - } + /* Free any remaining pages. */ + if (cur_size > 0) { + mm.Close(cur_phys_addr, cur_size / PageSize); } } @@ -260,6 +266,7 @@ namespace ams::kern::arch::arm64 { KPageTableBase::Finalize(); } + R_SUCCEED(); } @@ -570,7 +577,7 @@ namespace ams::kern::arch::arm64 { context.is_contiguous = false; context.level_entries[context.level] = pte + num_to_clear - 1; - /* We may have removed the last entries in a table, in which case we can free an unmap the tables. */ + /* We may have removed the last entries in a table, in which case we can free and unmap the tables. */ if (context.level >= KPageTableImpl::EntryLevel_L1 || context.level_entries[context.level + 1]->GetTableNumEntries() != 0) { break; } @@ -1056,7 +1063,7 @@ namespace ams::kern::arch::arm64 { /* If we merged, correct the traversal to a sane state. */ if (merge) { - /* NOTE: Nintendo does not verify the result of this BeginTraversal call. */ + /* NOTE: Begin a new traversal, now that we've merged. */ MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), cur_virt_addr)); /* The actual size needs to not take into account the portion of the block before our virtual address. */ From c9df4f8e260c30c04813fd5786e50022e1a6ba8c Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 14:20:31 -0700 Subject: [PATCH 29/35] kern: update ChangePermissions to use new iteration logic --- .../arch/arm64/kern_k_page_table.hpp | 1 - .../source/arch/arm64/kern_k_page_table.cpp | 73 +++++-------------- 2 files changed, 20 insertions(+), 54 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp index e4fca0dd58..0398d1a160 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp @@ -245,7 +245,6 @@ namespace ams::kern::arch::arm64 { static ALWAYS_INLINE void ClearPageTable(KVirtualAddress table) { cpu::ClearPageToZero(GetVoidPointer(table)); - cpu::DataSynchronizationBarrierInnerShareable(); } ALWAYS_INLINE void OnTableUpdated() const { diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index d34c98b69f..63f4b46da6 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -293,6 +293,10 @@ namespace ams::kern::arch::arm64 { switch (operation) { case OperationType_Map: + /* If mapping io or uncached pages, ensure that there is no pending reschedule. */ + if (properties.io || properties.uncached) { + KScopedSchedulerLock sl; + } R_RETURN(this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll)); case OperationType_ChangePermissions: R_RETURN(this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, false, false, page_list, reuse_ll)); @@ -317,6 +321,10 @@ namespace ams::kern::arch::arm64 { switch (operation) { case OperationType_MapGroup: case OperationType_MapFirstGroup: + /* If mapping io or uncached pages, ensure that there is no pending reschedule. */ + if (properties.io || properties.uncached) { + KScopedSchedulerLock sl; + } R_RETURN(this->MapGroup(virt_addr, page_group, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, operation != OperationType_MapFirstGroup, page_list, reuse_ll)); MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); } @@ -900,6 +908,9 @@ namespace ams::kern::arch::arm64 { Result KPageTable::ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, bool flush_mapping, PageLinkedList *page_list, bool reuse_ll) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + /* Ensure there are no pending data writes. */ + cpu::DataSynchronizationBarrier(); + /* Separate pages before we change permissions. */ R_TRY(this->SeparatePages(virt_addr, num_pages, page_list, reuse_ll)); @@ -990,59 +1001,15 @@ namespace ams::kern::arch::arm64 { } /* Apply the entry template. */ - L1PageTableEntry *l1_entry = impl.GetL1Entry(cur_virt_addr); - switch (next_entry.block_size) { - case L1BlockSize: - { - /* Write the updated entry. */ - *l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, next_entry.phys_addr, entry_template, sw_reserved_bits, false); - } - break; - case L2ContiguousBlockSize: - case L2BlockSize: - { - /* Get the number of L2 blocks. */ - const size_t num_l2_blocks = next_entry.block_size / L2BlockSize; - - /* Get the L2 entry. */ - KPhysicalAddress l2_phys = Null; - MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys)); - const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys); - - /* Write the updated entry. */ - const bool contig = next_entry.block_size == L2ContiguousBlockSize; - for (size_t i = 0; i < num_l2_blocks; i++) { - *impl.GetL2EntryFromTable(l2_virt, cur_virt_addr + L2BlockSize * i) = L2PageTableEntry(PageTableEntry::BlockTag{}, next_entry.phys_addr + L2BlockSize * i, entry_template, sw_reserved_bits, contig); - sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); - } - } - break; - case L3ContiguousBlockSize: - case L3BlockSize: - { - /* Get the number of L3 blocks. */ - const size_t num_l3_blocks = next_entry.block_size / L3BlockSize; - - /* Get the L2 entry. */ - KPhysicalAddress l2_phys = Null; - MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys)); - const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys); - L2PageTableEntry *l2_entry = impl.GetL2EntryFromTable(l2_virt, cur_virt_addr); - - /* Get the L3 entry. */ - KPhysicalAddress l3_phys = Null; - MESOSPHERE_ABORT_UNLESS(l2_entry->GetTable(l3_phys)); - const KVirtualAddress l3_virt = GetPageTableVirtualAddress(l3_phys); - - /* Write the updated entry. */ - const bool contig = next_entry.block_size == L3ContiguousBlockSize; - for (size_t i = 0; i < num_l3_blocks; i++) { - *impl.GetL3EntryFromTable(l3_virt, cur_virt_addr + L3BlockSize * i) = L3PageTableEntry(PageTableEntry::BlockTag{}, next_entry.phys_addr + L3BlockSize * i, entry_template, sw_reserved_bits, contig); - sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); - } - } - break; - MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + { + const size_t num_entries = context.is_contiguous ? BlocksPerContiguousBlock : 1; + + auto * const pte = context.level_entries[context.level]; + const size_t block_size = impl.GetBlockSize(context.level); + for (size_t i = 0; i < num_entries; ++i) { + pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, next_entry.phys_addr + i * block_size, entry_template, sw_reserved_bits, context.is_contiguous, context.level == KPageTableImpl::EntryLevel_L3); + sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); + } } /* If our option asks us to, try to merge mappings. */ From ac1a8e749e4601ffb626e3189f1afdd2f2665989 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 15:29:29 -0700 Subject: [PATCH 30/35] kern: update KPageTable::Map for new refactor --- .../arch/arm64/kern_k_page_table.hpp | 25 +- .../arch/arm64/kern_k_page_table_impl.hpp | 10 +- .../source/arch/arm64/kern_k_page_table.cpp | 309 +++++++----------- 3 files changed, 128 insertions(+), 216 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp index 0398d1a160..f889d199a0 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp @@ -201,32 +201,9 @@ namespace ams::kern::arch::arm64 { NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index); Result Finalize(); private: - Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll); - Result MapL2Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll); - Result MapL3Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll); - Result Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll); - Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, size_t page_size, PageLinkedList *page_list, bool reuse_ll) { - switch (page_size) { - case L1BlockSize: - R_RETURN(this->MapL1Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll)); - case L2ContiguousBlockSize: - entry_template.SetContiguous(true); - [[fallthrough]]; -#ifdef ATMOSPHERE_BOARD_NINTENDO_NX - case L2TegraSmmuBlockSize: -#endif - case L2BlockSize: - R_RETURN(this->MapL2Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll)); - case L3ContiguousBlockSize: - entry_template.SetContiguous(true); - [[fallthrough]]; - case L3BlockSize: - R_RETURN(this->MapL3Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll)); - MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); - } - } + Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, size_t page_size, PageLinkedList *page_list, bool reuse_ll); Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll); Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp index 68b324f6e8..57159c5273 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp @@ -78,8 +78,6 @@ namespace ams::kern::arch::arm64 { static constexpr ALWAYS_INLINE uintptr_t GetL2Index(KProcessAddress addr) { return GetBits(GetInteger(addr)); } static constexpr ALWAYS_INLINE uintptr_t GetL3Index(KProcessAddress addr) { return GetBits(GetInteger(addr)); } - static constexpr ALWAYS_INLINE uintptr_t GetLevelIndex(KProcessAddress addr, EntryLevel level) { return GetBits(GetInteger(addr), PageBits + LevelBits * level, LevelBits); } - static constexpr ALWAYS_INLINE uintptr_t GetL1Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 1)>(GetInteger(addr)); } static constexpr ALWAYS_INLINE uintptr_t GetL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2)>(GetInteger(addr)); } static constexpr ALWAYS_INLINE uintptr_t GetL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3)>(GetInteger(addr)); } @@ -93,10 +91,8 @@ namespace ams::kern::arch::arm64 { static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) { return KMemoryLayout::GetLinearVirtualAddress(addr); } - - //ALWAYS_INLINE bool ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const; - //ALWAYS_INLINE bool ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const; - //ALWAYS_INLINE bool ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const; + public: + static constexpr ALWAYS_INLINE uintptr_t GetLevelIndex(KProcessAddress addr, EntryLevel level) { return GetBits(GetInteger(addr), PageBits + LevelBits * level, LevelBits); } private: L1PageTableEntry *m_table; bool m_is_kernel; @@ -134,6 +130,8 @@ namespace ams::kern::arch::arm64 { explicit KPageTableImpl() { /* ... */ } + size_t GetNumL1Entries() const { return m_num_entries; } + NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end); NOINLINE void InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end); L1PageTableEntry *Finalize(); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index 63f4b46da6..cbd586289b 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -330,195 +330,12 @@ namespace ams::kern::arch::arm64 { } } - Result KPageTable::MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) { - MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); - MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), L1BlockSize)); - MESOSPHERE_ASSERT(util::IsAligned(num_pages * PageSize, L1BlockSize)); - - /* Allocation is never needed for L1 block mapping. */ - MESOSPHERE_UNUSED(page_list, reuse_ll); - - auto &impl = this->GetImpl(); - - u8 sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(disable_head_merge, false, false); - - /* Iterate, mapping each block. */ - for (size_t i = 0; i < num_pages; i += L1BlockSize / PageSize) { - /* Map the block. */ - *impl.GetL1Entry(virt_addr) = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false); - sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); - virt_addr += L1BlockSize; - phys_addr += L1BlockSize; - } - - R_SUCCEED(); - } - - Result KPageTable::MapL2Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) { - MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), L2BlockSize)); - MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), L2BlockSize)); - MESOSPHERE_ASSERT(util::IsAligned(num_pages * PageSize, L2BlockSize)); - - auto &impl = this->GetImpl(); - KVirtualAddress l2_virt = Null; - int l2_open_count = 0; - - u8 sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(disable_head_merge, false, false); - - /* Iterate, mapping each block. */ - for (size_t i = 0; i < num_pages; i += L2BlockSize / PageSize) { - KPhysicalAddress l2_phys = Null; - - /* If we have no L2 table, we should get or allocate one. */ - if (l2_virt == Null) { - if (L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr); !l1_entry->GetTable(l2_phys)) { - /* Allocate table. */ - l2_virt = AllocatePageTable(page_list, reuse_ll); - R_UNLESS(l2_virt != Null, svc::ResultOutOfResource()); - - /* Set the entry. */ - l2_phys = GetPageTablePhysicalAddress(l2_virt); - PteDataMemoryBarrier(); - *l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true); - } else { - l2_virt = GetPageTableVirtualAddress(l2_phys); - } - } - MESOSPHERE_ASSERT(l2_virt != Null); - - /* Map the block. */ - *impl.GetL2EntryFromTable(l2_virt, virt_addr) = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false); - sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); - l2_open_count++; - virt_addr += L2BlockSize; - phys_addr += L2BlockSize; - - /* Account for hitting end of table. */ - if (util::IsAligned(GetInteger(virt_addr), L1BlockSize)) { - if (this->GetPageTableManager().IsInPageTableHeap(l2_virt)) { - this->GetPageTableManager().Open(l2_virt, l2_open_count); - } - l2_virt = Null; - l2_open_count = 0; - } - } - - /* Perform any remaining opens. */ - if (l2_open_count > 0 && this->GetPageTableManager().IsInPageTableHeap(l2_virt)) { - this->GetPageTableManager().Open(l2_virt, l2_open_count); - } - - R_SUCCEED(); - } - - Result KPageTable::MapL3Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) { - MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize)); - MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); - - auto &impl = this->GetImpl(); - KVirtualAddress l2_virt = Null; - KVirtualAddress l3_virt = Null; - int l2_open_count = 0; - int l3_open_count = 0; - - u8 sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(disable_head_merge, false, false); - - /* Iterate, mapping each page. */ - for (size_t i = 0; i < num_pages; i++) { - KPhysicalAddress l3_phys = Null; - bool l2_allocated = false; - - /* If we have no L3 table, we should get or allocate one. */ - if (l3_virt == Null) { - KPhysicalAddress l2_phys = Null; - - /* If we have no L2 table, we should get or allocate one. */ - if (l2_virt == Null) { - if (L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr); !l1_entry->GetTable(l2_phys)) { - /* Allocate table. */ - l2_virt = AllocatePageTable(page_list, reuse_ll); - R_UNLESS(l2_virt != Null, svc::ResultOutOfResource()); - - /* Set the entry. */ - l2_phys = GetPageTablePhysicalAddress(l2_virt); - PteDataMemoryBarrier(); - *l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true); - l2_allocated = true; - } else { - l2_virt = GetPageTableVirtualAddress(l2_phys); - } - } - MESOSPHERE_ASSERT(l2_virt != Null); - - if (L2PageTableEntry *l2_entry = impl.GetL2EntryFromTable(l2_virt, virt_addr); !l2_entry->GetTable(l3_phys)) { - /* Allocate table. */ - l3_virt = AllocatePageTable(page_list, reuse_ll); - if (l3_virt == Null) { - /* Cleanup the L2 entry. */ - if (l2_allocated) { - *impl.GetL1Entry(virt_addr) = InvalidL1PageTableEntry; - this->NoteUpdated(); - FreePageTable(page_list, l2_virt); - } else if (this->GetPageTableManager().IsInPageTableHeap(l2_virt) && l2_open_count > 0) { - this->GetPageTableManager().Open(l2_virt, l2_open_count); - } - - R_THROW(svc::ResultOutOfResource()); - } - - /* Set the entry. */ - l3_phys = GetPageTablePhysicalAddress(l3_virt); - PteDataMemoryBarrier(); - *l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, l3_phys, this->IsKernel(), true); - l2_open_count++; - } else { - l3_virt = GetPageTableVirtualAddress(l3_phys); - } - } - MESOSPHERE_ASSERT(l3_virt != Null); - - /* Map the page. */ - *impl.GetL3EntryFromTable(l3_virt, virt_addr) = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false); - sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); - l3_open_count++; - virt_addr += PageSize; - phys_addr += PageSize; - - /* Account for hitting end of table. */ - if (util::IsAligned(GetInteger(virt_addr), L2BlockSize)) { - if (this->GetPageTableManager().IsInPageTableHeap(l3_virt)) { - this->GetPageTableManager().Open(l3_virt, l3_open_count); - } - l3_virt = Null; - l3_open_count = 0; - - if (util::IsAligned(GetInteger(virt_addr), L1BlockSize)) { - if (this->GetPageTableManager().IsInPageTableHeap(l2_virt) && l2_open_count > 0) { - this->GetPageTableManager().Open(l2_virt, l2_open_count); - } - l2_virt = Null; - l2_open_count = 0; - } - } - } - - /* Perform any remaining opens. */ - if (l2_open_count > 0 && this->GetPageTableManager().IsInPageTableHeap(l2_virt)) { - this->GetPageTableManager().Open(l2_virt, l2_open_count); - } - if (l3_open_count > 0 && this->GetPageTableManager().IsInPageTableHeap(l3_virt)) { - this->GetPageTableManager().Open(l3_virt, l3_open_count); - } - - R_SUCCEED(); - } - Result KPageTable::Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + /* Ensure there are no pending data writes. */ + cpu::DataSynchronizationBarrier(); + auto &impl = this->GetImpl(); /* If we're not forcing an unmap, separate pages immediately. */ @@ -632,6 +449,126 @@ namespace ams::kern::arch::arm64 { R_SUCCEED(); } + Result KPageTable::Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, size_t page_size, PageLinkedList *page_list, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + /* MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize)); */ + /* MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); */ + + auto &impl = this->GetImpl(); + u8 sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(disable_head_merge, false, false); + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry entry; + bool valid = impl.BeginTraversal(std::addressof(entry), std::addressof(context), virt_addr); + + /* Iterate, mapping each page. */ + while (num_pages > 0) { + /* If we're mapping at the address, there must be nothing there. */ + MESOSPHERE_ABORT_UNLESS(!valid); + + /* If we fail, clean up any empty tables we may have allocated. */ + ON_RESULT_FAILURE { + /* Remove entries for and free any tables. */ + while (context.level < KPageTableImpl::EntryLevel_L1) { + /* If the higher-level table has entries, we don't need to do a free. */ + if (context.level_entries[context.level + 1]->GetTableNumEntries() != 0) { + break; + } + + /* If there's no table, we also don't need to do a free. */ + const KVirtualAddress table = KVirtualAddress(util::AlignDown(reinterpret_cast(context.level_entries[context.level]), PageSize)); + if (table == Null) { + break; + } + + /* Clear the entry for the table we're removing. */ + *context.level_entries[context.level + 1] = InvalidPageTableEntry; + + /* Remove the entry for the table one level higher. */ + if (context.level + 1 < KPageTableImpl::EntryLevel_L1) { + context.level_entries[context.level + 2]->RemoveTableEntries(1); + } + + /* Advance our level. */ + context.level = static_cast(util::ToUnderlying(context.level) + 1); + + /* Note that we performed an update and free the table. */ + this->NoteUpdated(); + this->FreePageTable(page_list, table); + } + }; + + /* If necessary, allocate page tables for the entry. */ + size_t mapping_size = entry.block_size; + while (mapping_size > page_size) { + /* Allocate the table. */ + const auto table = AllocatePageTable(page_list, reuse_ll); + R_UNLESS(table != Null, svc::ResultOutOfResource()); + + /* Wait for pending stores to complete. */ + cpu::DataSynchronizationBarrierInnerShareableStore(); + + /* Update the block entry to be a table entry. */ + *context.level_entries[context.level] = PageTableEntry(PageTableEntry::TableTag{}, KPageTable::GetPageTablePhysicalAddress(table), this->IsKernel(), true, 0); + + /* Add the entry to the table containing this one. */ + if (context.level != KPageTableImpl::EntryLevel_L1) { + context.level_entries[context.level + 1]->AddTableEntries(1); + } + + /* Decrease our level. */ + context.level = static_cast(util::ToUnderlying(context.level) - 1); + + /* Add our new entry to the context. */ + context.level_entries[context.level] = GetPointer(table) + impl.GetLevelIndex(virt_addr, context.level); + + /* Update our mapping size. */ + mapping_size = impl.GetBlockSize(context.level); + } + + /* Determine how many pages we can set up on this iteration. */ + const size_t block_size = impl.GetBlockSize(context.level); + const size_t max_ptes = (context.level == KPageTableImpl::EntryLevel_L1 ? impl.GetNumL1Entries() : BlocksPerTable) - ((reinterpret_cast(context.level_entries[context.level]) / sizeof(PageTableEntry)) & (BlocksPerTable - 1)); + const size_t max_pages = (block_size * max_ptes) / PageSize; + + const size_t cur_pages = std::min(max_pages, num_pages); + + /* Determine the new base attribute. */ + const bool contig = page_size >= BlocksPerContiguousBlock * mapping_size; + + const size_t num_ptes = cur_pages / (block_size / PageSize); + auto *pte = context.level_entries[context.level]; + for (size_t i = 0; i < num_ptes; ++i) { + *pte = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr + i * block_size, entry_template, sw_reserved_bits, contig, context.level == KPageTableImpl::EntryLevel_L3); + sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); + } + + /* Add the entries to the table containing this one. */ + if (context.level != KPageTableImpl::EntryLevel_L1) { + context.level_entries[context.level + 1]->AddTableEntries(num_ptes); + } + + /* Update our context. */ + context.is_contiguous = contig; + context.level_entries[context.level] = pte + num_ptes - (contig ? BlocksPerContiguousBlock : 1); + + /* Advance our addresses. */ + phys_addr += cur_pages * PageSize; + virt_addr += cur_pages * PageSize; + num_pages -= cur_pages; + + /* Continue traversal. */ + valid = impl.ContinueTraversal(std::addressof(entry), std::addressof(context)); + } + + /* We mapped, so wait for our writes to take. */ + cpu::DataSynchronizationBarrierInnerShareableStore(); + + R_SUCCEED(); + + } + Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); From ecff16fb3c825377b2d9cc1b141fd220b7b4aa71 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 16:10:18 -0700 Subject: [PATCH 31/35] kern: first round of page table refactor bug fixes --- .../libmesosphere/source/arch/arm64/kern_k_page_table.cpp | 4 ++-- .../source/arch/arm64/kern_k_page_table_impl.cpp | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index cbd586289b..d488a11520 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -540,7 +540,7 @@ namespace ams::kern::arch::arm64 { const size_t num_ptes = cur_pages / (block_size / PageSize); auto *pte = context.level_entries[context.level]; for (size_t i = 0; i < num_ptes; ++i) { - *pte = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr + i * block_size, entry_template, sw_reserved_bits, contig, context.level == KPageTableImpl::EntryLevel_L3); + pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr + i * block_size, entry_template, sw_reserved_bits, contig, context.level == KPageTableImpl::EntryLevel_L3); sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); } @@ -803,7 +803,7 @@ namespace ams::kern::arch::arm64 { } /* Separate. */ - impl.SeparatePages(entry, context, virt_addr, nullptr); + impl.SeparatePages(entry, context, virt_addr, GetPointer(table)); this->NoteUpdated(); } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp index 249024a290..3a9c1275b3 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -261,7 +261,7 @@ namespace ams::kern::arch::arm64 { auto * const first = context->level_entries[context->level]; const KPhysicalAddress block = this->GetBlock(first, context->level); for (size_t i = 0; i < BlocksPerTable; ++i) { - pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * (context->level - 1))), PageTableEntry(first->GetEntryTemplateForSeparate(i)), PageTableEntry::SoftwareReservedBit_None, true, context->level == EntryLevel_L3); + pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * (context->level - 1))), PageTableEntry(first->GetEntryTemplateForSeparate(i)), PageTableEntry::SoftwareReservedBit_None, true, context->level - 1 == EntryLevel_L3); } context->is_contiguous = true; @@ -271,7 +271,8 @@ namespace ams::kern::arch::arm64 { cpu::DataSynchronizationBarrierInnerShareableStore(); /* Update the block entry to be a table entry. */ - *context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::TableTag{}, KPageTable::GetPageTablePhysicalAddress(KVirtualAddress(first)), m_is_kernel, true, BlocksPerTable); + *context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::TableTag{}, KPageTable::GetPageTablePhysicalAddress(KVirtualAddress(pte)), m_is_kernel, true, BlocksPerTable); + context->level_entries[context->level] = pte + this->GetLevelIndex(address, context->level); } From 5c7122d0f36d2cbc1f952fbc7d558d3598a3e442 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 18:04:54 -0700 Subject: [PATCH 32/35] kern: fix more page table refactor bugs --- .../arch/arm64/kern_k_page_table_impl.hpp | 8 ++++++ .../source/arch/arm64/kern_k_page_table.cpp | 27 ++++++++++++++----- .../arch/arm64/kern_k_page_table_impl.cpp | 4 +-- 3 files changed, 30 insertions(+), 9 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp index 57159c5273..e8965c4d86 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp @@ -146,6 +146,14 @@ namespace ams::kern::arch::arm64 { static bool MergePages(KVirtualAddress *out, TraversalContext *context); void SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const; + + KProcessAddress GetAddressForContext(const TraversalContext *context) const { + KProcessAddress addr = m_is_kernel ? static_cast(-GetBlockSize(EntryLevel_L1)) * m_num_entries : 0; + for (u32 level = context->level; level <= EntryLevel_L1; ++level) { + addr += ((reinterpret_cast(context->level_entries[level]) / sizeof(PageTableEntry)) & (BlocksPerTable - 1)) << (PageBits + LevelBits * level); + } + return addr; + } }; } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index d488a11520..39efc0b5ff 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -226,6 +226,9 @@ namespace ams::kern::arch::arm64 { /* If we cleared a table, we need to note that we updated and free the table. */ if (freeing_table) { KVirtualAddress table = KVirtualAddress(util::AlignDown(reinterpret_cast(context.level_entries[context.level - 1]), PageSize)); + if (table == Null) { + break; + } ClearPageTable(table); this->GetPageTableManager().Free(table); } @@ -243,11 +246,14 @@ namespace ams::kern::arch::arm64 { context.level = static_cast(util::ToUnderlying(context.level) + 1); freeing_table = true; } - } /* Continue the traversal. */ cur_valid = impl.ContinueTraversal(std::addressof(entry), std::addressof(context)); + + if (entry.block_size == 0) { + break; + } } /* Free any remaining pages. */ @@ -266,7 +272,6 @@ namespace ams::kern::arch::arm64 { KPageTableBase::Finalize(); } - R_SUCCEED(); } @@ -379,6 +384,7 @@ namespace ams::kern::arch::arm64 { /* Unmap the block. */ bool freeing_table = false; + bool need_recalculate_virt_addr = false; while (true) { /* Clear the entries. */ const size_t num_to_clear = (!freeing_table && context.is_contiguous) ? BlocksPerContiguousBlock : 1; @@ -394,8 +400,14 @@ namespace ams::kern::arch::arm64 { /* If we cleared a table, we need to note that we updated and free the table. */ if (freeing_table) { + /* If there's no table, we also don't need to do a free. */ + const KVirtualAddress table = KVirtualAddress(util::AlignDown(reinterpret_cast(context.level_entries[context.level - 1]), PageSize)); + if (table == Null) { + break; + } this->NoteUpdated(); - this->FreePageTable(page_list, KVirtualAddress(util::AlignDown(reinterpret_cast(context.level_entries[context.level - 1]), PageSize))); + this->FreePageTable(page_list, table); + need_recalculate_virt_addr = true; } /* Advance; we're no longer contiguous. */ @@ -424,9 +436,10 @@ namespace ams::kern::arch::arm64 { /* Advance. */ size_t freed_size = next_entry.block_size; - if (freeing_table) { + if (need_recalculate_virt_addr) { /* We advanced more than by the block, so we need to calculate the actual advanced size. */ - const KProcessAddress new_virt_addr = util::AlignUp(GetInteger(virt_addr), impl.GetBlockSize(context.level, context.is_contiguous)); + const size_t block_size = impl.GetBlockSize(context.level, context.is_contiguous); + const KProcessAddress new_virt_addr = util::AlignDown(GetInteger(impl.GetAddressForContext(std::addressof(context))) + block_size, block_size); MESOSPHERE_ABORT_UNLESS(new_virt_addr >= virt_addr + next_entry.block_size); freed_size = std::min(new_virt_addr - virt_addr, remaining_pages * PageSize); @@ -451,8 +464,8 @@ namespace ams::kern::arch::arm64 { Result KPageTable::Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, size_t page_size, PageLinkedList *page_list, bool reuse_ll) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - /* MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize)); */ - /* MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); */ + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize)); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); auto &impl = this->GetImpl(); u8 sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(disable_head_merge, false, false); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp index 3a9c1275b3..bddf5323f9 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -176,7 +176,7 @@ namespace ams::kern::arch::arm64 { /* We want to upgrade a contiguous mapping in a table to a block. */ PageTableEntry *pte = reinterpret_cast(util::AlignDown(reinterpret_cast(context->level_entries[context->level]), BlocksPerTable * sizeof(PageTableEntry))); - const KPhysicalAddress phys_addr = GetBlock(pte, context->level); + const KPhysicalAddress phys_addr = util::AlignDown(GetBlock(pte, context->level), GetBlockSize(static_cast(context->level + 1), false)); /* First, check that all entries are valid for us to merge. */ const u64 entry_template = pte->GetEntryTemplateForMerge(); @@ -208,7 +208,7 @@ namespace ams::kern::arch::arm64 { } else { /* We want to upgrade a non-contiguous mapping to a contiguous mapping. */ PageTableEntry *pte = reinterpret_cast(util::AlignDown(reinterpret_cast(context->level_entries[context->level]), BlocksPerContiguousBlock * sizeof(PageTableEntry))); - const KPhysicalAddress phys_addr = GetBlock(pte, context->level); + const KPhysicalAddress phys_addr = util::AlignDown(GetBlock(pte, context->level), GetBlockSize(context->level, true)); /* First, check that all entries are valid for us to merge. */ const u64 entry_template = pte->GetEntryTemplateForMerge(); From c06a4c696d90963256d844dd2e46a0b0391a8045 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 19:14:07 -0700 Subject: [PATCH 33/35] kern: Perform page table validity pass during KPageTableImpl::InitializeForKernel --- .../arm64/init/kern_k_init_page_table.hpp | 104 +++++++++--------- .../arch/arm64/kern_k_page_table_entry.hpp | 22 ++-- .../source/arch/arm64/kern_k_page_table.cpp | 16 +-- .../arch/arm64/kern_k_page_table_impl.cpp | 54 +++++++++ 4 files changed, 127 insertions(+), 69 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp index 8b9a6953c5..805b967db1 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp @@ -110,47 +110,47 @@ namespace ams::kern::arch::arm64::init { L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); /* If an L1 block is mapped or we're empty, advance by L1BlockSize. */ - if (l1_entry->IsBlock() || l1_entry->IsEmpty()) { + if (l1_entry->IsMappedBlock() || l1_entry->IsEmpty()) { MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); MESOSPHERE_INIT_ABORT_UNLESS(static_cast(end_virt_addr - virt_addr) >= L1BlockSize); virt_addr += L1BlockSize; - if (l1_entry->IsBlock() && block_size == L1BlockSize) { + if (l1_entry->IsMappedBlock() && block_size == L1BlockSize) { count++; } continue; } /* Non empty and non-block must be table. */ - MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable()); + MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable()); /* Table, so check if we're mapped in L2. */ L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); - if (l2_entry->IsBlock() || l2_entry->IsEmpty()) { - const size_t advance_size = (l2_entry->IsBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize; + if (l2_entry->IsMappedBlock() || l2_entry->IsEmpty()) { + const size_t advance_size = (l2_entry->IsMappedBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize; MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size)); MESOSPHERE_INIT_ABORT_UNLESS(static_cast(end_virt_addr - virt_addr) >= advance_size); virt_addr += advance_size; - if (l2_entry->IsBlock() && block_size == advance_size) { + if (l2_entry->IsMappedBlock() && block_size == advance_size) { count++; } continue; } /* Non empty and non-block must be table. */ - MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable()); + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable()); /* Table, so check if we're mapped in L3. */ L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); /* L3 must be block or empty. */ - MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock() || l3_entry->IsEmpty()); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsEmpty()); - const size_t advance_size = (l3_entry->IsBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize; + const size_t advance_size = (l3_entry->IsMappedBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize; MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size)); MESOSPHERE_INIT_ABORT_UNLESS(static_cast(end_virt_addr - virt_addr) >= advance_size); virt_addr += advance_size; - if (l3_entry->IsBlock() && block_size == advance_size) { + if (l3_entry->IsMappedBlock() && block_size == advance_size) { count++; } } @@ -164,10 +164,10 @@ namespace ams::kern::arch::arm64::init { L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); /* If an L1 block is mapped or we're empty, advance by L1BlockSize. */ - if (l1_entry->IsBlock() || l1_entry->IsEmpty()) { + if (l1_entry->IsMappedBlock() || l1_entry->IsEmpty()) { MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); MESOSPHERE_INIT_ABORT_UNLESS(static_cast(end_virt_addr - virt_addr) >= L1BlockSize); - if (l1_entry->IsBlock() && block_size == L1BlockSize) { + if (l1_entry->IsMappedBlock() && block_size == L1BlockSize) { if ((count++) == index) { return virt_addr; } @@ -177,16 +177,16 @@ namespace ams::kern::arch::arm64::init { } /* Non empty and non-block must be table. */ - MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable()); + MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable()); /* Table, so check if we're mapped in L2. */ L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); - if (l2_entry->IsBlock() || l2_entry->IsEmpty()) { - const size_t advance_size = (l2_entry->IsBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize; + if (l2_entry->IsMappedBlock() || l2_entry->IsEmpty()) { + const size_t advance_size = (l2_entry->IsMappedBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize; MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size)); MESOSPHERE_INIT_ABORT_UNLESS(static_cast(end_virt_addr - virt_addr) >= advance_size); - if (l2_entry->IsBlock() && block_size == advance_size) { + if (l2_entry->IsMappedBlock() && block_size == advance_size) { if ((count++) == index) { return virt_addr; } @@ -196,18 +196,18 @@ namespace ams::kern::arch::arm64::init { } /* Non empty and non-block must be table. */ - MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable()); + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable()); /* Table, so check if we're mapped in L3. */ L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); /* L3 must be block or empty. */ - MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock() || l3_entry->IsEmpty()); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsEmpty()); - const size_t advance_size = (l3_entry->IsBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize; + const size_t advance_size = (l3_entry->IsMappedBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize; MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size)); MESOSPHERE_INIT_ABORT_UNLESS(static_cast(end_virt_addr - virt_addr) >= advance_size); - if (l3_entry->IsBlock() && block_size == advance_size) { + if (l3_entry->IsMappedBlock() && block_size == advance_size) { if ((count++) == index) { return virt_addr; } @@ -220,29 +220,29 @@ namespace ams::kern::arch::arm64::init { PageTableEntry *GetMappingEntry(KVirtualAddress virt_addr, size_t block_size) { L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); - if (l1_entry->IsBlock()) { + if (l1_entry->IsMappedBlock()) { MESOSPHERE_INIT_ABORT_UNLESS(block_size == L1BlockSize); return l1_entry; } - MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable()); + MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable()); /* Table, so check if we're mapped in L2. */ L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); - if (l2_entry->IsBlock()) { + if (l2_entry->IsMappedBlock()) { const size_t real_size = (l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize; MESOSPHERE_INIT_ABORT_UNLESS(real_size == block_size); return l2_entry; } - MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable()); + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable()); /* Table, so check if we're mapped in L3. */ L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); /* L3 must be block. */ - MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock()); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock()); const size_t real_size = (l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize; MESOSPHERE_INIT_ABORT_UNLESS(real_size == block_size); @@ -340,7 +340,7 @@ namespace ams::kern::arch::arm64::init { } /* If we don't already have an L2 table, we need to make a new one. */ - if (!l1_entry->IsTable()) { + if (!l1_entry->IsMappedTable()) { KPhysicalAddress new_table = AllocateNewPageTable(allocator, phys_to_virt_offset); cpu::DataSynchronizationBarrierInnerShareable(); *l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever()); @@ -371,7 +371,7 @@ namespace ams::kern::arch::arm64::init { } /* If we don't already have an L3 table, we need to make a new one. */ - if (!l2_entry->IsTable()) { + if (!l2_entry->IsMappedTable()) { KPhysicalAddress new_table = AllocateNewPageTable(allocator, phys_to_virt_offset); cpu::DataSynchronizationBarrierInnerShareable(); *l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever()); @@ -416,12 +416,12 @@ namespace ams::kern::arch::arm64::init { for (size_t l1_index = 0; l1_index < m_num_entries[0]; l1_index++) { /* Get L1 entry. */ L1PageTableEntry * const l1_entry = l1_table + l1_index; - if (l1_entry->IsBlock()) { + if (l1_entry->IsMappedBlock()) { /* Unmap the L1 entry, if we should. */ if (ShouldUnmap(l1_entry)) { *static_cast(l1_entry) = InvalidPageTableEntry; } - } else if (l1_entry->IsTable()) { + } else if (l1_entry->IsMappedTable()) { /* Get the L2 table. */ L2PageTableEntry * const l2_table = reinterpret_cast(GetInteger(l1_entry->GetTable()) + phys_to_virt_offset); @@ -430,7 +430,7 @@ namespace ams::kern::arch::arm64::init { for (size_t l2_index = 0; l2_index < MaxPageTableEntries; ++l2_index) { /* Get L2 entry. */ L2PageTableEntry * const l2_entry = l2_table + l2_index; - if (l2_entry->IsBlock()) { + if (l2_entry->IsMappedBlock()) { const size_t num_to_clear = (l2_entry->IsContiguous() ? L2ContiguousBlockSize : L2BlockSize) / L2BlockSize; if (ShouldUnmap(l2_entry)) { @@ -442,7 +442,7 @@ namespace ams::kern::arch::arm64::init { } l2_index = l2_index + num_to_clear - 1; - } else if (l2_entry->IsTable()) { + } else if (l2_entry->IsMappedTable()) { /* Get the L3 table. */ L3PageTableEntry * const l3_table = reinterpret_cast(GetInteger(l2_entry->GetTable()) + phys_to_virt_offset); @@ -450,7 +450,7 @@ namespace ams::kern::arch::arm64::init { size_t remaining_l3_entries = 0; for (size_t l3_index = 0; l3_index < MaxPageTableEntries; ++l3_index) { /* Get L3 entry. */ - if (L3PageTableEntry * const l3_entry = l3_table + l3_index; l3_entry->IsBlock()) { + if (L3PageTableEntry * const l3_entry = l3_table + l3_index; l3_entry->IsMappedBlock()) { const size_t num_to_clear = (l3_entry->IsContiguous() ? L3ContiguousBlockSize : L3BlockSize) / L3BlockSize; if (ShouldUnmap(l3_entry)) { @@ -498,25 +498,25 @@ namespace ams::kern::arch::arm64::init { /* Get the L1 entry. */ const L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); - if (l1_entry->IsBlock()) { + if (l1_entry->IsMappedBlock()) { return l1_entry->GetBlock() + (GetInteger(virt_addr) & (L1BlockSize - 1)); } - MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable()); + MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable()); /* Get the L2 entry. */ const L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); - if (l2_entry->IsBlock()) { + if (l2_entry->IsMappedBlock()) { return l2_entry->GetBlock() + (GetInteger(virt_addr) & (L2BlockSize - 1)); } - MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable()); + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable()); /* Get the L3 entry. */ const L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); - MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock()); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock()); return l3_entry->GetBlock() + (GetInteger(virt_addr) & (L3BlockSize - 1)); } @@ -561,26 +561,26 @@ namespace ams::kern::arch::arm64::init { L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); /* If an L1 block is mapped, update. */ - if (l1_entry->IsBlock()) { + if (l1_entry->IsMappedBlock()) { UpdateExtents(l1_entry->GetBlock(), L1BlockSize); continue; } /* Not a block, so we must have a table. */ - MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable()); + MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable()); L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); - if (l2_entry->IsBlock()) { + if (l2_entry->IsMappedBlock()) { UpdateExtents(l2_entry->GetBlock(), l2_entry->IsContiguous() ? L2ContiguousBlockSize : L2BlockSize); continue; } /* Not a block, so we must have a table. */ - MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable()); + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable()); /* We must have a mapped l3 entry to inspect. */ L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); - MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock()); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock()); UpdateExtents(l3_entry->GetBlock(), l3_entry->IsContiguous() ? L3ContiguousBlockSize : L3BlockSize); } @@ -602,11 +602,11 @@ namespace ams::kern::arch::arm64::init { L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); /* If an L1 block is mapped, the address isn't free. */ - if (l1_entry->IsBlock()) { + if (l1_entry->IsMappedBlock()) { return false; } - if (!l1_entry->IsTable()) { + if (!l1_entry->IsMappedTable()) { /* Not a table, so just move to check the next region. */ virt_addr = util::AlignDown(GetInteger(virt_addr) + L1BlockSize, L1BlockSize); continue; @@ -615,11 +615,11 @@ namespace ams::kern::arch::arm64::init { /* Table, so check if we're mapped in L2. */ L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); - if (l2_entry->IsBlock()) { + if (l2_entry->IsMappedBlock()) { return false; } - if (!l2_entry->IsTable()) { + if (!l2_entry->IsMappedTable()) { /* Not a table, so just move to check the next region. */ virt_addr = util::AlignDown(GetInteger(virt_addr) + L2BlockSize, L2BlockSize); continue; @@ -628,7 +628,7 @@ namespace ams::kern::arch::arm64::init { /* Table, so check if we're mapped in L3. */ L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); - if (l3_entry->IsBlock()) { + if (l3_entry->IsMappedBlock()) { return false; } @@ -648,7 +648,7 @@ namespace ams::kern::arch::arm64::init { L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); /* Check if an L1 block is present. */ - if (l1_entry->IsBlock()) { + if (l1_entry->IsMappedBlock()) { /* Ensure that we are allowed to have an L1 block here. */ const KPhysicalAddress block = l1_entry->GetBlock(); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); @@ -669,10 +669,10 @@ namespace ams::kern::arch::arm64::init { } /* Not a block, so we must be a table. */ - MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable()); + MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable()); L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); - if (l2_entry->IsBlock()) { + if (l2_entry->IsMappedBlock()) { const KPhysicalAddress block = l2_entry->GetBlock(); if (l2_entry->IsContiguous()) { @@ -720,11 +720,11 @@ namespace ams::kern::arch::arm64::init { } /* Not a block, so we must be a table. */ - MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable()); + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable()); /* We must have a mapped l3 entry to reprotect. */ L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); - MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock()); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock()); const KPhysicalAddress block = l3_entry->GetBlock(); if (l3_entry->IsContiguous()) { diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp index d3ac790894..6fce69f816 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp @@ -128,8 +128,8 @@ namespace ams::kern::arch::arm64 { } /* Construct a table. */ - constexpr explicit ALWAYS_INLINE PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool is_kernel, bool pxn, size_t num_blocks) - : PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast(pxn) << 59) | GetInteger(phys_addr) | (num_blocks << 2) | 0x3) + constexpr explicit ALWAYS_INLINE PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool is_kernel, bool pxn, size_t ref_count) + : PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast(pxn) << 59) | GetInteger(phys_addr) | (ref_count << 2) | 0x3) { /* ... */ } @@ -203,6 +203,7 @@ namespace ams::kern::arch::arm64 { constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const { return this->SelectBits(12, 36); } + constexpr ALWAYS_INLINE bool IsMappedBlock() const { return this->GetBits(0, 2) == 1; } constexpr ALWAYS_INLINE bool IsMappedTable() const { return this->GetBits(0, 2) == 3; } constexpr ALWAYS_INLINE bool IsMapped() const { return this->GetBits(0, 1) != 0; } @@ -217,11 +218,13 @@ namespace ams::kern::arch::arm64 { constexpr ALWAYS_INLINE decltype(auto) SetPageAttribute(PageAttribute a) { this->SetBitsDirect(2, 3, a); return *this; } constexpr ALWAYS_INLINE decltype(auto) SetMapped(bool m) { static_assert(static_cast(MappingFlag_Mapped == (1 << 0))); this->SetBit(0, m); return *this; } - constexpr ALWAYS_INLINE size_t GetTableNumEntries() const { return this->GetBits(2, 10); } - constexpr ALWAYS_INLINE decltype(auto) SetTableNumEntries(size_t num) { this->SetBits(2, 10, num); } + constexpr ALWAYS_INLINE size_t GetTableReferenceCount() const { return this->GetBits(2, 10); } + constexpr ALWAYS_INLINE decltype(auto) SetTableReferenceCount(size_t num) { this->SetBits(2, 10, num); return *this; } - constexpr ALWAYS_INLINE decltype(auto) AddTableEntries(size_t num) { return this->SetTableNumEntries(this->GetTableNumEntries() + num); } - constexpr ALWAYS_INLINE decltype(auto) RemoveTableEntries(size_t num) { return this->SetTableNumEntries(this->GetTableNumEntries() - num); } + constexpr ALWAYS_INLINE decltype(auto) OpenTableReferences(size_t num) { MESOSPHERE_ASSERT(this->GetTableReferenceCount() + num <= BlocksPerTable + 1); return this->SetTableReferenceCount(this->GetTableReferenceCount() + num); } + constexpr ALWAYS_INLINE decltype(auto) CloseTableReferences(size_t num) { MESOSPHERE_ASSERT(this->GetTableReferenceCount() >= num); return this->SetTableReferenceCount(this->GetTableReferenceCount() - num); } + + constexpr ALWAYS_INLINE decltype(auto) SetValid() { MESOSPHERE_ASSERT((m_attributes & ExtensionFlag_Valid) == 0); m_attributes |= ExtensionFlag_Valid; return *this; } constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const { constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail)); @@ -301,7 +304,7 @@ namespace ams::kern::arch::arm64 { } constexpr explicit ALWAYS_INLINE L1PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig) - : PageTableEntry(attr, (static_cast(sw_reserved_bits) << 55) | (static_cast(contig) << 52) | GetInteger(phys_addr) | PageTableEntry::ExtensionFlag_Valid) + : PageTableEntry(attr, (static_cast(sw_reserved_bits) << 55) | (static_cast(contig) << 52) | GetInteger(phys_addr) | 0x1) { /* ... */ } @@ -363,7 +366,7 @@ namespace ams::kern::arch::arm64 { } constexpr explicit ALWAYS_INLINE L2PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig) - : PageTableEntry(attr, (static_cast(sw_reserved_bits) << 55) | (static_cast(contig) << 52) | GetInteger(phys_addr) | PageTableEntry::ExtensionFlag_Valid) + : PageTableEntry(attr, (static_cast(sw_reserved_bits) << 55) | (static_cast(contig) << 52) | GetInteger(phys_addr) | 0x1) { /* ... */ } @@ -428,12 +431,13 @@ namespace ams::kern::arch::arm64 { constexpr explicit ALWAYS_INLINE L3PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ } constexpr explicit ALWAYS_INLINE L3PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig) - : PageTableEntry(attr, (static_cast(sw_reserved_bits) << 55) | (static_cast(contig) << 52) | GetInteger(phys_addr) | static_cast(ExtensionFlag_TestTableMask)) + : PageTableEntry(attr, (static_cast(sw_reserved_bits) << 55) | (static_cast(contig) << 52) | GetInteger(phys_addr) | 0x3) { /* ... */ } constexpr ALWAYS_INLINE bool IsBlock() const { return (GetRawAttributes() & ExtensionFlag_TestTableMask) == ExtensionFlag_TestTableMask; } + constexpr ALWAYS_INLINE bool IsMappedBlock() const { return this->GetBits(0, 2) == 3; } constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const { return this->SelectBits(12, 36); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index 39efc0b5ff..d13f819795 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -220,7 +220,7 @@ namespace ams::kern::arch::arm64 { /* Remove the entries from the previous table. */ if (context.level != KPageTableImpl::EntryLevel_L1) { - context.level_entries[context.level + 1]->RemoveTableEntries(num_to_clear); + context.level_entries[context.level + 1]->CloseTableReferences(num_to_clear); } /* If we cleared a table, we need to note that we updated and free the table. */ @@ -238,7 +238,7 @@ namespace ams::kern::arch::arm64 { context.level_entries[context.level] = pte + num_to_clear - 1; /* We may have removed the last entries in a table, in which case we can free and unmap the tables. */ - if (context.level >= KPageTableImpl::EntryLevel_L1 || context.level_entries[context.level + 1]->GetTableNumEntries() != 0) { + if (context.level >= KPageTableImpl::EntryLevel_L1 || context.level_entries[context.level + 1]->GetTableReferenceCount() != 0) { break; } @@ -395,7 +395,7 @@ namespace ams::kern::arch::arm64 { /* Remove the entries from the previous table. */ if (context.level != KPageTableImpl::EntryLevel_L1) { - context.level_entries[context.level + 1]->RemoveTableEntries(num_to_clear); + context.level_entries[context.level + 1]->CloseTableReferences(num_to_clear); } /* If we cleared a table, we need to note that we updated and free the table. */ @@ -415,7 +415,7 @@ namespace ams::kern::arch::arm64 { context.level_entries[context.level] = pte + num_to_clear - 1; /* We may have removed the last entries in a table, in which case we can free and unmap the tables. */ - if (context.level >= KPageTableImpl::EntryLevel_L1 || context.level_entries[context.level + 1]->GetTableNumEntries() != 0) { + if (context.level >= KPageTableImpl::EntryLevel_L1 || context.level_entries[context.level + 1]->GetTableReferenceCount() != 0) { break; } @@ -485,7 +485,7 @@ namespace ams::kern::arch::arm64 { /* Remove entries for and free any tables. */ while (context.level < KPageTableImpl::EntryLevel_L1) { /* If the higher-level table has entries, we don't need to do a free. */ - if (context.level_entries[context.level + 1]->GetTableNumEntries() != 0) { + if (context.level_entries[context.level + 1]->GetTableReferenceCount() != 0) { break; } @@ -500,7 +500,7 @@ namespace ams::kern::arch::arm64 { /* Remove the entry for the table one level higher. */ if (context.level + 1 < KPageTableImpl::EntryLevel_L1) { - context.level_entries[context.level + 2]->RemoveTableEntries(1); + context.level_entries[context.level + 2]->CloseTableReferences(1); } /* Advance our level. */ @@ -527,7 +527,7 @@ namespace ams::kern::arch::arm64 { /* Add the entry to the table containing this one. */ if (context.level != KPageTableImpl::EntryLevel_L1) { - context.level_entries[context.level + 1]->AddTableEntries(1); + context.level_entries[context.level + 1]->OpenTableReferences(1); } /* Decrease our level. */ @@ -559,7 +559,7 @@ namespace ams::kern::arch::arm64 { /* Add the entries to the table containing this one. */ if (context.level != KPageTableImpl::EntryLevel_L1) { - context.level_entries[context.level + 1]->AddTableEntries(num_ptes); + context.level_entries[context.level + 1]->OpenTableReferences(num_ptes); } /* Update our context. */ diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp index bddf5323f9..32dfb00e53 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -27,6 +27,60 @@ namespace ams::kern::arch::arm64 { m_table = static_cast(tb); m_is_kernel = false; m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize; + + /* Page table entries created by KInitialPageTable need to be iterated and modified to ensure KPageTable invariants. */ + PageTableEntry *level_entries[EntryLevel_Count] = { nullptr, nullptr, m_table }; + u32 level = EntryLevel_L1; + while (level != EntryLevel_L1 || (level_entries[EntryLevel_L1] - static_cast(m_table)) < m_num_entries) { + /* Get the pte; it must never have the validity-extension flag set. */ + auto *pte = level_entries[level]; + MESOSPHERE_ASSERT((pte->GetSoftwareReservedBits() & PageTableEntry::SoftwareReservedBit_Valid) == 0); + + /* While we're a table, recurse, fixing up the reference counts. */ + while (level > EntryLevel_L3 && pte->IsMappedTable()) { + /* Count how many references are in the table. */ + auto *table = GetPointer(GetPageTableVirtualAddress(pte->GetTable())); + + size_t ref_count = 0; + for (size_t i = 0; i < BlocksPerTable; ++i) { + if (table[i].IsMapped()) { + ++ref_count; + } + } + + /* Set the reference count for our new page, adding one additional uncloseable reference; kernel pages must never be unreferenced. */ + pte->SetTableReferenceCount(ref_count + 1).SetValid(); + + /* Iterate downwards. */ + level -= 1; + level_entries[level] = table; + pte = level_entries[level]; + + /* Check that the entry isn't unexpected. */ + MESOSPHERE_ASSERT((pte->GetSoftwareReservedBits() & PageTableEntry::SoftwareReservedBit_Valid) == 0); + } + + /* We're dealing with some block. If it's mapped, set it valid. */ + if (pte->IsMapped()) { + pte->SetValid(); + } + + /* Advance. */ + while (true) { + /* Advance to the next entry at the current level. */ + ++level_entries[level]; + if (!util::IsAligned(reinterpret_cast(++level_entries[level]), PageSize)) { + break; + } + + /* If we're at the end of a level, advance upwards. */ + level_entries[level++] = nullptr; + + if (level > EntryLevel_L1) { + return; + } + } + } } L1PageTableEntry *KPageTableImpl::Finalize() { From 5717ea6c00f551f13ae34a944ae64a4d15b71db6 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 19:14:22 -0700 Subject: [PATCH 34/35] kern: support reboot to fatal error on mariko --- .../source/board/nintendo/nx/kern_k_system_control.cpp | 7 +------ .../source/board/nintendo/nx/kern_secure_monitor.hpp | 7 ++++--- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp index 8050f78e0a..b82518e6e2 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp @@ -623,13 +623,8 @@ namespace ams::kern::board::nintendo::nx { for (size_t i = 0; i < RebootPayloadSize / sizeof(u32); ++i) { GetPointer(iram_address)[i] = GetPointer(reboot_payload)[i]; } - - /* Reboot. */ - smc::SetConfig(smc::ConfigItem::ExosphereNeedsReboot, smc::UserRebootType_ToPayload); - } else { - /* If we don't have a payload, reboot to rcm. */ - smc::SetConfig(smc::ConfigItem::ExosphereNeedsReboot, smc::UserRebootType_ToRcm); } + smc::SetConfig(smc::ConfigItem::ExosphereNeedsReboot, smc::UserRebootType_ToFatalError); } if (g_call_smc_on_panic) { diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp index 1e64d11f9b..6ee325aa32 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp @@ -98,9 +98,10 @@ namespace ams::kern::board::nintendo::nx::smc { }; enum UserRebootType { - UserRebootType_None = 0, - UserRebootType_ToRcm = 1, - UserRebootType_ToPayload = 2, + UserRebootType_None = 0, + UserRebootType_ToRcm = 1, + UserRebootType_ToPayload = 2, + UserRebootType_ToFatalError = 3, }; void GenerateRandomBytes(void *dst, size_t size); From 8fbee5637a3a4b870117716ead0835006d951965 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Mon, 14 Oct 2024 10:19:37 -0700 Subject: [PATCH 35/35] loader: add usb 3.0 enable patches for 19.0.0 --- stratosphere/loader/source/ldr_embedded_usb_patches.inc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stratosphere/loader/source/ldr_embedded_usb_patches.inc b/stratosphere/loader/source/ldr_embedded_usb_patches.inc index 4ae97c2ec2..e51b60a10c 100644 --- a/stratosphere/loader/source/ldr_embedded_usb_patches.inc +++ b/stratosphere/loader/source/ldr_embedded_usb_patches.inc @@ -64,6 +64,11 @@ constexpr inline const EmbeddedPatchEntry Usb30ForceEnablePatches_18_0_0[] = { { 0x6E48, "\x20\x00\x80\x52\xC0\x03\x5F\xD6", 8 }, }; +constexpr inline const EmbeddedPatchEntry Usb30ForceEnablePatches_19_0_0[] = { + { 0x6D90, "\x20\x00\x80\x52\xC0\x03\x5F\xD6", 8 }, + { 0x6E10, "\x20\x00\x80\x52\xC0\x03\x5F\xD6", 8 }, +}; + constexpr inline const EmbeddedPatch Usb30ForceEnablePatches[] = { { ParseModuleId("C0D3F4E87E8B0FE9BBE9F1968A20767F3DC08E03"), util::size(Usb30ForceEnablePatches_9_0_0), Usb30ForceEnablePatches_9_0_0 }, { ParseModuleId("B9C700CA8335F8BAA0D2041D8D09F772890BA988"), util::size(Usb30ForceEnablePatches_10_0_0), Usb30ForceEnablePatches_10_0_0 }, @@ -76,4 +81,5 @@ constexpr inline const EmbeddedPatch Usb30ForceEnablePatches[] = { { ParseModuleId("225865A442B4B66E8BD14B3E9450B901BDF29A40"), util::size(Usb30ForceEnablePatches_16_0_0), Usb30ForceEnablePatches_16_0_0 }, /* 16.0.0 */ { ParseModuleId("70D4C2ABCD049F16B301186924367F813DA70248"), util::size(Usb30ForceEnablePatches_17_0_0), Usb30ForceEnablePatches_17_0_0 }, /* 17.0.0 */ { ParseModuleId("4F21AE15E814FA46515C0401BB23D4F7ADCBF3F4"), util::size(Usb30ForceEnablePatches_18_0_0), Usb30ForceEnablePatches_18_0_0 }, /* 18.0.0 */ + { ParseModuleId("54BB9BB32C958E02752DC5E4AE8D016BFE1F5418"), util::size(Usb30ForceEnablePatches_19_0_0), Usb30ForceEnablePatches_19_0_0 }, /* 19.0.0 */ };