Discussion:
[PATCH Makedumpfile 01/10] arm64: cleanup code, comment, blank space, blank lines etc
Pratyush Anand
2016-10-25 07:22:53 UTC
Permalink
Do some code cleanup for blank spaces and lines, static function and
commenting style.
Also, re-organize some #defines to place them in a particular fashion.
Remove, redundant #defines.

Signed-off-by: Pratyush Anand <***@redhat.com>
---
arch/arm64.c | 29 ++++++++++-------------------
1 file changed, 10 insertions(+), 19 deletions(-)

diff --git a/arch/arm64.c b/arch/arm64.c
index f7540262a6ae..4a2a9a9ec45d 100644
--- a/arch/arm64.c
+++ b/arch/arm64.c
@@ -35,25 +35,21 @@ typedef struct {
pud_t pud;
} pmd_t;

-#define pud_offset(pgd, vaddr) ((pud_t *)pgd)
+typedef struct {
+ unsigned long pte;
+} pte_t;

#define pgd_val(x) ((x).pgd)
#define pud_val(x) (pgd_val((x).pgd))
#define pmd_val(x) (pud_val((x).pud))
-
-#define PUD_SHIFT PGDIR_SHIFT
-#define PUD_SIZE (1UL << PUD_SHIFT)
-
-typedef struct {
- unsigned long pte;
-} pte_t;
#define pte_val(x) ((x).pte)

#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define PGDIR_SHIFT ((PAGE_SHIFT - 3) * ARM64_PGTABLE_LEVELS + 3)
+#define PUD_SHIFT PGDIR_SHIFT
+#define PUD_SIZE (1UL << PUD_SHIFT)
#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
-#define PMD_SHIFT ((PAGE_SHIFT - 3) * 2 + 3)
#define PTRS_PER_PTE (1 << (PAGE_SHIFT - 3))
#define PMD_SHIFT ((PAGE_SHIFT - 3) * 2 + 3)
#define PMD_SIZE (1UL << PMD_SHIFT)
@@ -62,17 +58,14 @@ typedef struct {

#define PAGE_PRESENT (1 << 0)
#define SECTIONS_SIZE_BITS 30
-/*
-
-* Highest possible physical address supported.
-*/
+/* Highest possible physical address supported */
#define PHYS_MASK_SHIFT 48
#define PHYS_MASK ((1UL << PHYS_MASK_SHIFT) - 1)
/*
* Remove the highest order bits that are not a part of the
* physical address in a section
*/
-#define PMD_SECTION_MASK ((1UL << 40) - 1)
+#define PMD_SECTION_MASK ((1UL << 40) - 1)

#define PMD_TYPE_MASK 3
#define PMD_TYPE_SECT 1
@@ -84,15 +77,13 @@ typedef struct {
#define pgd_index(vaddr) (((vaddr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
#define pgd_offset(pgdir, vaddr) ((pgd_t *)(pgdir) + pgd_index(vaddr))

-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_index(vaddr) (((vaddr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pmd_page_vaddr(pmd) (__va(pmd_val(pmd) & PHYS_MASK & (int32_t)PAGE_MASK))
#define pte_offset(dir, vaddr) ((pte_t*)pmd_page_vaddr((*dir)) + pte_index(vaddr))

-
-#define pmd_offset_pgtbl_lvl_2(pud, vaddr) ((pmd_t *)pud)
-
#define pmd_index(vaddr) (((vaddr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
#define pud_page_vaddr(pud) (__va(pud_val(pud) & PHYS_MASK & (int32_t)PAGE_MASK))
+#define pmd_offset_pgtbl_lvl_2(pud, vaddr) ((pmd_t *)pud)
#define pmd_offset_pgtbl_lvl_3(pud, vaddr) ((pmd_t *)pud_page_vaddr((*pud)) + pmd_index(vaddr))

/* kernel struct page size can be kernel version dependent, currently
@@ -128,7 +119,7 @@ get_page_shift_arm64(void)
return page_shift;
}

-pmd_t *
+static pmd_t *
pmd_offset(pud_t *puda, pud_t *pudv, unsigned long vaddr)
{
if (pgtable_level == 2) {
--
2.7.4
Pratyush Anand
2016-10-25 07:22:54 UTC
Permalink
Kernel can also write a hex value in NUMBER(X) string, for example:
vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",PHYS_OFFSET);

Therefore, allow read_vmcoreinfo_long() to read 'base 16' values if, 'base
10' read resulted in a read of invalid character.

Signed-off-by: Pratyush Anand <***@redhat.com>
---
makedumpfile.c | 2 ++
1 file changed, 2 insertions(+)

diff --git a/makedumpfile.c b/makedumpfile.c
index ab9719bb678e..b2ea3ebdd4cb 100644
--- a/makedumpfile.c
+++ b/makedumpfile.c
@@ -2433,6 +2433,8 @@ read_vmcoreinfo_long(char *str_structure)
buf[i - 1] = '\0';
if (strncmp(buf, str_structure, strlen(str_structure)) == 0) {
data = strtol(buf + strlen(str_structure), &endp, 10);
+ if (strlen(endp) != 0)
+ data = strtol(buf + strlen(str_structure), &endp, 16);
if ((data == LONG_MAX) || strlen(endp) != 0) {
ERRMSG("Invalid data in %s: %s",
info->name_vmcoreinfo, buf);
--
2.7.4
Pratyush Anand
2016-10-25 07:22:55 UTC
Permalink
Kernel may pass an unsigned number into vmcore. For example ARM64 passes:

vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n", PHYS_OFFSET);

Therefore, introducing read_vmcoreinfo_ulong() to read such values
correctly.

Signed-off-by: Pratyush Anand <***@redhat.com>
---
makedumpfile.c | 34 ++++++++++++++++++++++++++++++++++
makedumpfile.h | 16 ++++++++++++++++
2 files changed, 50 insertions(+)

diff --git a/makedumpfile.c b/makedumpfile.c
index b2ea3ebdd4cb..e248db876858 100644
--- a/makedumpfile.c
+++ b/makedumpfile.c
@@ -2412,6 +2412,40 @@ read_vmcoreinfo_symbol(char *str_symbol)
return symbol;
}

+unsigned long
+read_vmcoreinfo_ulong(char *str_structure)
+{
+ long data = NOT_FOUND_LONG_VALUE;
+ char buf[BUFSIZE_FGETS], *endp;
+ unsigned int i;
+
+ if (fseek(info->file_vmcoreinfo, 0, SEEK_SET) < 0) {
+ ERRMSG("Can't seek the vmcoreinfo file(%s). %s\n",
+ info->name_vmcoreinfo, strerror(errno));
+ return INVALID_STRUCTURE_DATA;
+ }
+
+ while (fgets(buf, BUFSIZE_FGETS, info->file_vmcoreinfo)) {
+ i = strlen(buf);
+ if (!i)
+ break;
+ if (buf[i - 1] == '\n')
+ buf[i - 1] = '\0';
+ if (strncmp(buf, str_structure, strlen(str_structure)) == 0) {
+ data = strtoul(buf + strlen(str_structure), &endp, 10);
+ if (strlen(endp) != 0)
+ data = strtoul(buf + strlen(str_structure), &endp, 16);
+ if ((data == LONG_MAX) || strlen(endp) != 0) {
+ ERRMSG("Invalid data in %s: %s",
+ info->name_vmcoreinfo, buf);
+ return INVALID_STRUCTURE_DATA;
+ }
+ break;
+ }
+ }
+ return data;
+}
+
long
read_vmcoreinfo_long(char *str_structure)
{
diff --git a/makedumpfile.h b/makedumpfile.h
index a5955ff750e5..c5e38a5b6e4b 100644
--- a/makedumpfile.h
+++ b/makedumpfile.h
@@ -393,6 +393,22 @@ do { \
return FALSE; \
} \
} while (0)
+#define WRITE_NUMBER_UNSIGNED(str_number, number) \
+do { \
+ if (NUMBER(number) != NOT_FOUND_NUMBER) { \
+ fprintf(info->file_vmcoreinfo, "%s%lu\n", \
+ STR_NUMBER(str_number), NUMBER(number)); \
+ } \
+} while (0)
+#define READ_NUMBER_UNSIGNED(str_number, number) \
+do { \
+ if (NUMBER(number) == NOT_FOUND_NUMBER) { \
+ NUMBER(number) = read_vmcoreinfo_ulong(STR_NUMBER(str_number)); \
+ if (NUMBER(number) == INVALID_STRUCTURE_DATA) \
+ return FALSE; \
+ } \
+} while (0)
+

/*
* for source file name
--
2.7.4
Pratyush Anand
2016-10-25 07:22:56 UTC
Permalink
Since PAGESIZE() and PAGESHIFT() macros uses page size value from
VMCOREINFO_PAGESIZE() therefore use them instead of hard coding.

Signed-off-by: Pratyush Anand <***@redhat.com>
---
arch/arm64.c | 24 +++++++-----------------
makedumpfile.h | 1 -
2 files changed, 7 insertions(+), 18 deletions(-)

diff --git a/arch/arm64.c b/arch/arm64.c
index 4a2a9a9ec45d..c73efa1aad54 100644
--- a/arch/arm64.c
+++ b/arch/arm64.c
@@ -44,14 +44,13 @@ typedef struct {
#define pmd_val(x) (pud_val((x).pud))
#define pte_val(x) ((x).pte)

-#define PAGE_SIZE (1UL << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE - 1))
-#define PGDIR_SHIFT ((PAGE_SHIFT - 3) * ARM64_PGTABLE_LEVELS + 3)
+#define PAGE_MASK (~(PAGESIZE() - 1))
+#define PGDIR_SHIFT ((PAGESHIFT() - 3) * ARM64_PGTABLE_LEVELS + 3)
#define PUD_SHIFT PGDIR_SHIFT
#define PUD_SIZE (1UL << PUD_SHIFT)
#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
-#define PTRS_PER_PTE (1 << (PAGE_SHIFT - 3))
-#define PMD_SHIFT ((PAGE_SHIFT - 3) * 2 + 3)
+#define PTRS_PER_PTE (1 << (PAGESHIFT() - 3))
+#define PMD_SHIFT ((PAGESHIFT() - 3) * 2 + 3)
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE - 1))
#define PTRS_PER_PMD PTRS_PER_PTE
@@ -77,7 +76,7 @@ typedef struct {
#define pgd_index(vaddr) (((vaddr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
#define pgd_offset(pgdir, vaddr) ((pgd_t *)(pgdir) + pgd_index(vaddr))

-#define pte_index(vaddr) (((vaddr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_index(vaddr) (((vaddr) >> PAGESHIFT()) & (PTRS_PER_PTE - 1))
#define pmd_page_vaddr(pmd) (__va(pmd_val(pmd) & PHYS_MASK & (int32_t)PAGE_MASK))
#define pte_offset(dir, vaddr) ((pte_t*)pmd_page_vaddr((*dir)) + pte_index(vaddr))

@@ -92,14 +91,13 @@ typedef struct {
#define KERN_STRUCT_PAGE_SIZE get_structure_size("page", DWARF_INFO_GET_STRUCT_SIZE)

#define ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
-#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
-#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * KERN_STRUCT_PAGE_SIZE, PUD_SIZE)
+#define PFN_DOWN(x) ((x) >> PAGESHIFT())
+#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGESHIFT())) * KERN_STRUCT_PAGE_SIZE, PUD_SIZE)
#define MODULES_END PAGE_OFFSET
#define MODULES_VADDR (MODULES_END - 0x4000000)

static int pgtable_level;
static int va_bits;
-static int page_shift;

int
get_pgtable_level_arm64(void)
@@ -113,12 +111,6 @@ get_va_bits_arm64(void)
return va_bits;
}

-int
-get_page_shift_arm64(void)
-{
- return page_shift;
-}
-
static pmd_t *
pmd_offset(pud_t *puda, pud_t *pudv, unsigned long vaddr)
{
@@ -150,11 +142,9 @@ static int calculate_plat_config(void)
if ((stext & PAGE_OFFSET_39) == PAGE_OFFSET_39) {
pgtable_level = 3;
va_bits = 39;
- page_shift = 12;
} else if ((stext & PAGE_OFFSET_42) == PAGE_OFFSET_42) {
pgtable_level = 2;
va_bits = 42;
- page_shift = 16;
} else {
ERRMSG("Kernel Configuration not supported\n");
return FALSE;
diff --git a/makedumpfile.h b/makedumpfile.h
index c5e38a5b6e4b..c363c547b448 100644
--- a/makedumpfile.h
+++ b/makedumpfile.h
@@ -519,7 +519,6 @@ do { \
int get_va_bits_arm64(void);
#define ARM64_PGTABLE_LEVELS get_pgtable_level_arm64()
#define VA_BITS get_va_bits_arm64()
-#define PAGE_SHIFT get_page_shift_arm64()
#define KVBASE VMALLOC_START
#endif /* aarch64 */
--
2.7.4
Pratyush Anand
2016-10-25 07:22:57 UTC
Permalink
arch/arm64/include/asm/memory.h defines PAGE_OFFSET as follows:

#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))

Use same definition in makedumpfile as well.

Signed-off-by: Pratyush Anand <***@redhat.com>
---
arch/arm64.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/arch/arm64.c b/arch/arm64.c
index c73efa1aad54..ce5869452484 100644
--- a/arch/arm64.c
+++ b/arch/arm64.c
@@ -203,8 +203,7 @@ get_machdep_info_arm64(void)
{
info->max_physmem_bits = PHYS_MASK_SHIFT;
info->section_size_bits = SECTIONS_SIZE_BITS;
- info->page_offset = SYMBOL(_stext)
- & (0xffffffffffffffffUL << (VA_BITS - 1));
+ info->page_offset = 0xffffffffffffffffUL << (VA_BITS - 1);
info->vmalloc_start = 0xffffffffffffffffUL << VA_BITS;
info->vmalloc_end = PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - 0x10000;
info->vmemmap_start = VMALLOC_END + 0x10000;
--
2.7.4
Pratyush Anand
2016-10-25 07:22:58 UTC
Permalink
get_phys_base() is not called for re-filtering. Therefore,
calculate_plat_config() is never called if re-filtering is enabled.
Otherwise too, it seems more appropriate to call calculate_plat_config() in
get_machdep_info(). Therefore, moving calling of calculate_plat_config()
into get_machdep_info().

Signed-off-by: Pratyush Anand <***@redhat.com>
---
arch/arm64.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/arch/arm64.c b/arch/arm64.c
index ce5869452484..a5b1bca43e64 100644
--- a/arch/arm64.c
+++ b/arch/arm64.c
@@ -171,11 +171,6 @@ get_phys_base_arm64(void)
unsigned long long phys_start;
int i;

- if (!calculate_plat_config()) {
- ERRMSG("Can't determine platform config values\n");
- return FALSE;
- }
-
/*
* We resolve phys_base from PT_LOAD segments. LMA contains physical
* address of the segment, and we use the lowest start as
@@ -201,6 +196,11 @@ get_phys_base_arm64(void)
int
get_machdep_info_arm64(void)
{
+ if (!calculate_plat_config()) {
+ ERRMSG("Can't determine platform config values\n");
+ return FALSE;
+ }
+
info->max_physmem_bits = PHYS_MASK_SHIFT;
info->section_size_bits = SECTIONS_SIZE_BITS;
info->page_offset = 0xffffffffffffffffUL << (VA_BITS - 1);
--
2.7.4
Pratyush Anand
2016-10-25 07:22:59 UTC
Permalink
ARM64 kdump-v26 patch (4/7) [1] has embedded VA_BITS and PHYS_OFFSET into
vmcore. Therefore this patch does following improvements:

1) Read value of VA_BITS from vmcore, instead of hard coding.
2) Remove calculation of PHYS_OFFSET from PT_LOAD and read it from vmcore
as well.
3) arch/arm64/Kconfig defines page table levels on the basic of page size and
VA bits. Use same definition in makedumpfile as well.
4) Since pgtable_level and va_bits are used only in arm64.c, therefore get rid
of get_pgtable_level_arm64() and get_va_bits_arm64() function.
5) Since va_bits and pgtable_levels are local to arch/arm64.c, therefore
remove #define of ARM64_PGTABLE_LEVELS and VA_BITS from makedumpfile.h

[1] https://www.mail-archive.com/***@lists.infradead.org/msg16298.html

Signed-off-by: Pratyush Anand <***@redhat.com>
---
arch/arm64.c | 88 +++++++++++++++++-----------------------------------------
makedumpfile.c | 8 ++++++
makedumpfile.h | 7 +++--
3 files changed, 38 insertions(+), 65 deletions(-)

diff --git a/arch/arm64.c b/arch/arm64.c
index a5b1bca43e64..6f61b4c5497f 100644
--- a/arch/arm64.c
+++ b/arch/arm64.c
@@ -39,16 +39,23 @@ typedef struct {
unsigned long pte;
} pte_t;

+static int pgtable_level;
+static int va_bits;
+
+#define SZ_4K (4 * 1024)
+#define SZ_16K (16 * 1024)
+#define SZ_64K (64 * 1024)
+
#define pgd_val(x) ((x).pgd)
#define pud_val(x) (pgd_val((x).pgd))
#define pmd_val(x) (pud_val((x).pud))
#define pte_val(x) ((x).pte)

#define PAGE_MASK (~(PAGESIZE() - 1))
-#define PGDIR_SHIFT ((PAGESHIFT() - 3) * ARM64_PGTABLE_LEVELS + 3)
+#define PGDIR_SHIFT ((PAGESHIFT() - 3) * pgtable_level + 3)
#define PUD_SHIFT PGDIR_SHIFT
#define PUD_SIZE (1UL << PUD_SHIFT)
-#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
+#define PTRS_PER_PGD (1 << (va_bits - PGDIR_SHIFT))
#define PTRS_PER_PTE (1 << (PAGESHIFT() - 3))
#define PMD_SHIFT ((PAGESHIFT() - 3) * 2 + 3)
#define PMD_SIZE (1UL << PMD_SHIFT)
@@ -92,25 +99,10 @@ typedef struct {

#define ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
#define PFN_DOWN(x) ((x) >> PAGESHIFT())
-#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGESHIFT())) * KERN_STRUCT_PAGE_SIZE, PUD_SIZE)
+#define VMEMMAP_SIZE ALIGN((1UL << (va_bits - PAGESHIFT())) * KERN_STRUCT_PAGE_SIZE, PUD_SIZE)
#define MODULES_END PAGE_OFFSET
#define MODULES_VADDR (MODULES_END - 0x4000000)

-static int pgtable_level;
-static int va_bits;
-
-int
-get_pgtable_level_arm64(void)
-{
- return pgtable_level;
-}
-
-int
-get_va_bits_arm64(void)
-{
- return va_bits;
-}
-
static pmd_t *
pmd_offset(pud_t *puda, pud_t *pudv, unsigned long vaddr)
{
@@ -121,32 +113,23 @@ pmd_offset(pud_t *puda, pud_t *pudv, unsigned long vaddr)
}
}

-#define PAGE_OFFSET_39 (0xffffffffffffffffUL << 39)
-#define PAGE_OFFSET_42 (0xffffffffffffffffUL << 42)
static int calculate_plat_config(void)
{
- unsigned long long stext;
-
- /* Currently we assume that there are only two possible
- * configuration supported by kernel.
- * 1) Page Table Level:2, Page Size 64K and VA Bits 42
- * 1) Page Table Level:3, Page Size 4K and VA Bits 39
- * Ideally, we should have some mechanism to decide these values
- * from kernel symbols, but we have limited symbols in vmcore,
- * and we can not do much. So until some one comes with a better
- * way, we use following.
- */
- stext = SYMBOL(_stext);
+ va_bits = NUMBER(VA_BITS);

- /* condition for minimum VA bits must be checked first and so on */
- if ((stext & PAGE_OFFSET_39) == PAGE_OFFSET_39) {
- pgtable_level = 3;
- va_bits = 39;
- } else if ((stext & PAGE_OFFSET_42) == PAGE_OFFSET_42) {
+ /* derive pgtable_level as per arch/arm64/Kconfig */
+ if ((PAGESIZE() == SZ_16K && va_bits == 36) ||
+ (PAGESIZE() == SZ_64K && va_bits == 42)) {
pgtable_level = 2;
- va_bits = 42;
+ } else if ((PAGESIZE() == SZ_64K && va_bits == 48) ||
+ (PAGESIZE() == SZ_4K && va_bits == 39) ||
+ (PAGESIZE() == SZ_16K && va_bits == 47)) {
+ pgtable_level = 3;
+ } else if ((PAGESIZE() != SZ_64K && va_bits == 48)) {
+ pgtable_level = 4;
} else {
- ERRMSG("Kernel Configuration not supported\n");
+ ERRMSG("PAGE SIZE %#lx and VA Bits %d not supported\n",
+ PAGESIZE(), va_bits);
return FALSE;
}

@@ -167,28 +150,9 @@ is_vtop_from_page_table_arm64(unsigned long vaddr)
int
get_phys_base_arm64(void)
{
- unsigned long phys_base = ULONG_MAX;
- unsigned long long phys_start;
- int i;
-
- /*
- * We resolve phys_base from PT_LOAD segments. LMA contains physical
- * address of the segment, and we use the lowest start as
- * phys_base.
- */
- for (i = 0; get_pt_load(i, &phys_start, NULL, NULL, NULL); i++) {
- if (phys_start < phys_base)
- phys_base = phys_start;
- }
-
- if (phys_base == ULONG_MAX) {
- ERRMSG("Can't determine phys_base\n");
- return FALSE;
- }
-
- info->phys_base = phys_base;
+ info->phys_base = NUMBER(PHYS_OFFSET);

- DEBUG_MSG("phys_base : %lx\n", phys_base);
+ DEBUG_MSG("phys_base : %lx\n", info->phys_base);

return TRUE;
}
@@ -203,8 +167,8 @@ get_machdep_info_arm64(void)

info->max_physmem_bits = PHYS_MASK_SHIFT;
info->section_size_bits = SECTIONS_SIZE_BITS;
- info->page_offset = 0xffffffffffffffffUL << (VA_BITS - 1);
- info->vmalloc_start = 0xffffffffffffffffUL << VA_BITS;
+ info->page_offset = 0xffffffffffffffffUL << (va_bits - 1);
+ info->vmalloc_start = 0xffffffffffffffffUL << va_bits;
info->vmalloc_end = PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - 0x10000;
info->vmemmap_start = VMALLOC_END + 0x10000;
info->vmemmap_end = VMEMMAP_START + VMEMMAP_SIZE;
diff --git a/makedumpfile.c b/makedumpfile.c
index e248db876858..df413f066348 100644
--- a/makedumpfile.c
+++ b/makedumpfile.c
@@ -2263,6 +2263,10 @@ write_vmcoreinfo_data(void)
WRITE_NUMBER("KERNEL_IMAGE_SIZE", KERNEL_IMAGE_SIZE);

WRITE_NUMBER("HUGETLB_PAGE_DTOR", HUGETLB_PAGE_DTOR);
+#ifdef __aarch64__
+ WRITE_NUMBER("VA_BITS", VA_BITS);
+ WRITE_NUMBER_UNSIGNED("PHYS_OFFSET", PHYS_OFFSET);
+#endif

/*
* write the source file of 1st kernel
@@ -2645,6 +2649,10 @@ read_vmcoreinfo(void)

READ_NUMBER("PAGE_BUDDY_MAPCOUNT_VALUE", PAGE_BUDDY_MAPCOUNT_VALUE);
READ_NUMBER("KERNEL_IMAGE_SIZE", KERNEL_IMAGE_SIZE);
+#ifdef __aarch64__
+ READ_NUMBER("VA_BITS", VA_BITS);
+ READ_NUMBER_UNSIGNED("PHYS_OFFSET", PHYS_OFFSET);
+#endif

READ_NUMBER("HUGETLB_PAGE_DTOR", HUGETLB_PAGE_DTOR);

diff --git a/makedumpfile.h b/makedumpfile.h
index c363c547b448..1a119ac949b6 100644
--- a/makedumpfile.h
+++ b/makedumpfile.h
@@ -516,9 +516,6 @@ do { \
#define PMASK (0x7ffffffffffff000UL)

#ifdef __aarch64__
-int get_va_bits_arm64(void);
-#define ARM64_PGTABLE_LEVELS get_pgtable_level_arm64()
-#define VA_BITS get_va_bits_arm64()
#define KVBASE VMALLOC_START
#endif /* aarch64 */

@@ -1736,6 +1733,10 @@ struct number_table {
long SECTION_SIZE_BITS;
long MAX_PHYSMEM_BITS;
long HUGETLB_PAGE_DTOR;
+#ifdef __aarch64__
+ long VA_BITS;
+ unsigned long PHYS_OFFSET;
+#endif
};

struct srcfile_table {
--
2.7.4
Pratyush Anand
2016-10-25 07:23:00 UTC
Permalink
Kernel mappings may change for vmalloc, vmemmap and module addresses. More
over, proposed kexec arm64 kernel patches do not provide sufficient
information in vmcore to separate out these areas.

Makedumpfile should be able to read entries from swapper page tables
minimally. Virtual address of swapper_pg_dir symbol will always be linearly
translatable. So, using __pa() macro we get physical address of
swapper_pg_dir and then we find physical address of any kernel virtual
address by reading corresponding entries in page table.

Signed-off-by: Pratyush Anand <***@redhat.com>
---
arch/arm64.c | 75 +++++++++++++---------------------------------------------
makedumpfile.h | 3 ++-
2 files changed, 19 insertions(+), 59 deletions(-)

diff --git a/arch/arm64.c b/arch/arm64.c
index 6f61b4c5497f..da8f43c62380 100644
--- a/arch/arm64.c
+++ b/arch/arm64.c
@@ -53,8 +53,6 @@ static int va_bits;

#define PAGE_MASK (~(PAGESIZE() - 1))
#define PGDIR_SHIFT ((PAGESHIFT() - 3) * pgtable_level + 3)
-#define PUD_SHIFT PGDIR_SHIFT
-#define PUD_SIZE (1UL << PUD_SHIFT)
#define PTRS_PER_PGD (1 << (va_bits - PGDIR_SHIFT))
#define PTRS_PER_PTE (1 << (PAGESHIFT() - 3))
#define PMD_SHIFT ((PAGESHIFT() - 3) * 2 + 3)
@@ -77,31 +75,19 @@ static int va_bits;
#define PMD_TYPE_SECT 1
#define PMD_TYPE_TABLE 3

-#define __va(paddr) ((paddr) - info->phys_base + PAGE_OFFSET)
#define __pa(vaddr) ((vaddr) - PAGE_OFFSET + info->phys_base)

#define pgd_index(vaddr) (((vaddr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
#define pgd_offset(pgdir, vaddr) ((pgd_t *)(pgdir) + pgd_index(vaddr))

#define pte_index(vaddr) (((vaddr) >> PAGESHIFT()) & (PTRS_PER_PTE - 1))
-#define pmd_page_vaddr(pmd) (__va(pmd_val(pmd) & PHYS_MASK & (int32_t)PAGE_MASK))
-#define pte_offset(dir, vaddr) ((pte_t*)pmd_page_vaddr((*dir)) + pte_index(vaddr))
+#define pmd_page_paddr(pmd) (pmd_val(pmd) & PHYS_MASK & (int32_t)PAGE_MASK)
+#define pte_offset(dir, vaddr) ((pte_t*)pmd_page_paddr((*dir)) + pte_index(vaddr))

#define pmd_index(vaddr) (((vaddr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-#define pud_page_vaddr(pud) (__va(pud_val(pud) & PHYS_MASK & (int32_t)PAGE_MASK))
+#define pud_page_paddr(pud) (pud_val(pud) & PHYS_MASK & (int32_t)PAGE_MASK)
#define pmd_offset_pgtbl_lvl_2(pud, vaddr) ((pmd_t *)pud)
-#define pmd_offset_pgtbl_lvl_3(pud, vaddr) ((pmd_t *)pud_page_vaddr((*pud)) + pmd_index(vaddr))
-
-/* kernel struct page size can be kernel version dependent, currently
- * keep it constant.
- */
-#define KERN_STRUCT_PAGE_SIZE get_structure_size("page", DWARF_INFO_GET_STRUCT_SIZE)
-
-#define ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
-#define PFN_DOWN(x) ((x) >> PAGESHIFT())
-#define VMEMMAP_SIZE ALIGN((1UL << (va_bits - PAGESHIFT())) * KERN_STRUCT_PAGE_SIZE, PUD_SIZE)
-#define MODULES_END PAGE_OFFSET
-#define MODULES_VADDR (MODULES_END - 0x4000000)
+#define pmd_offset_pgtbl_lvl_3(pud, vaddr) ((pmd_t *)pud_page_paddr((*pud)) + pmd_index(vaddr))

static pmd_t *
pmd_offset(pud_t *puda, pud_t *pudv, unsigned long vaddr)
@@ -136,15 +122,10 @@ static int calculate_plat_config(void)
return TRUE;
}

-static int
-is_vtop_from_page_table_arm64(unsigned long vaddr)
+unsigned long
+get_kvbase_arm64(void)
{
- /* If virtual address lies in vmalloc, vmemmap or module space
- * region then, get the physical address from page table.
- */
- return ((vaddr >= VMALLOC_START && vaddr <= VMALLOC_END)
- || (vaddr >= VMEMMAP_START && vaddr <= VMEMMAP_END)
- || (vaddr >= MODULES_VADDR && vaddr <= MODULES_END));
+ return (0xffffffffffffffffUL << va_bits);
}

int
@@ -168,20 +149,10 @@ get_machdep_info_arm64(void)
info->max_physmem_bits = PHYS_MASK_SHIFT;
info->section_size_bits = SECTIONS_SIZE_BITS;
info->page_offset = 0xffffffffffffffffUL << (va_bits - 1);
- info->vmalloc_start = 0xffffffffffffffffUL << va_bits;
- info->vmalloc_end = PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - 0x10000;
- info->vmemmap_start = VMALLOC_END + 0x10000;
- info->vmemmap_end = VMEMMAP_START + VMEMMAP_SIZE;

DEBUG_MSG("max_physmem_bits : %lx\n", info->max_physmem_bits);
DEBUG_MSG("section_size_bits: %lx\n", info->section_size_bits);
DEBUG_MSG("page_offset : %lx\n", info->page_offset);
- DEBUG_MSG("vmalloc_start : %lx\n", info->vmalloc_start);
- DEBUG_MSG("vmalloc_end : %lx\n", info->vmalloc_end);
- DEBUG_MSG("vmemmap_start : %lx\n", info->vmemmap_start);
- DEBUG_MSG("vmemmap_end : %lx\n", info->vmemmap_end);
- DEBUG_MSG("modules_start : %lx\n", MODULES_VADDR);
- DEBUG_MSG("modules_end : %lx\n", MODULES_END);

return TRUE;
}
@@ -211,17 +182,18 @@ get_versiondep_info_arm64(void)
}

/*
- * vtop_arm64() - translate arbitrary virtual address to physical
+ * vaddr_to_paddr_arm64() - translate arbitrary virtual address to physical
* @vaddr: virtual address to translate
*
* Function translates @vaddr into physical address using page tables. This
* address can be any virtual address. Returns physical address of the
* corresponding virtual address or %NOT_PADDR when there is no translation.
*/
-static unsigned long long
-vtop_arm64(unsigned long vaddr)
+unsigned long long
+vaddr_to_paddr_arm64(unsigned long vaddr)
{
unsigned long long paddr = NOT_PADDR;
+ unsigned long long swapper_phys;
pgd_t *pgda, pgdv;
pud_t *puda, pudv;
pmd_t *pmda, pmdv;
@@ -232,8 +204,10 @@ vtop_arm64(unsigned long vaddr)
return NOT_PADDR;
}

- pgda = pgd_offset(SYMBOL(swapper_pg_dir), vaddr);
- if (!readmem(VADDR, (unsigned long long)pgda, &pgdv, sizeof(pgdv))) {
+ swapper_phys = __pa(SYMBOL(swapper_pg_dir));
+
+ pgda = pgd_offset(swapper_phys, vaddr);
+ if (!readmem(PADDR, (unsigned long long)pgda, &pgdv, sizeof(pgdv))) {
ERRMSG("Can't read pgd\n");
return NOT_PADDR;
}
@@ -242,7 +216,7 @@ vtop_arm64(unsigned long vaddr)
puda = (pud_t *)pgda;

pmda = pmd_offset(puda, &pudv, vaddr);
- if (!readmem(VADDR, (unsigned long long)pmda, &pmdv, sizeof(pmdv))) {
+ if (!readmem(PADDR, (unsigned long long)pmda, &pmdv, sizeof(pmdv))) {
ERRMSG("Can't read pmd\n");
return NOT_PADDR;
}
@@ -251,7 +225,7 @@ vtop_arm64(unsigned long vaddr)
case PMD_TYPE_TABLE:
ptea = pte_offset(&pmdv, vaddr);
/* 64k page */
- if (!readmem(VADDR, (unsigned long long)ptea, &ptev, sizeof(ptev))) {
+ if (!readmem(PADDR, (unsigned long long)ptea, &ptev, sizeof(ptev))) {
ERRMSG("Can't read pte\n");
return NOT_PADDR;
}
@@ -275,19 +249,4 @@ vtop_arm64(unsigned long vaddr)
return paddr;
}

-unsigned long long
-vaddr_to_paddr_arm64(unsigned long vaddr)
-{
- /*
- * use translation tables when a) user has explicitly requested us to
- * perform translation for a given address. b) virtual address lies in
- * vmalloc, vmemmap or modules memory region. Otherwise we assume that
- * the translation is done within the kernel direct mapped region.
- */
- if ((info->vaddr_for_vtop == vaddr) ||
- is_vtop_from_page_table_arm64(vaddr))
- return vtop_arm64(vaddr);
-
- return __pa(vaddr);
-}
#endif /* __aarch64__ */
diff --git a/makedumpfile.h b/makedumpfile.h
index 1a119ac949b6..8ddc2c43cc29 100644
--- a/makedumpfile.h
+++ b/makedumpfile.h
@@ -516,7 +516,8 @@ do { \
#define PMASK (0x7ffffffffffff000UL)

#ifdef __aarch64__
-#define KVBASE VMALLOC_START
+unsigned long get_kvbase_arm64(void);
+#define KVBASE get_kvbase_arm64()
#endif /* aarch64 */

#ifdef __arm__
--
2.7.4
Pratyush Anand
2016-10-25 07:23:01 UTC
Permalink
From: Azriel Samson <***@codeaurora.org>

Add PUD translation for 4 level page tables.

Signed-off-by: Mansi Patel <***@codeaurora.org>
Signed-off-by: Azriel Samson <***@codeaurora.org>
Signed-off-by: Sameer Goel <***@codeaurora.org>
Signed-off-by: Pratyush Anand <***@redhat.com>
---
arch/arm64.c | 30 ++++++++++++++++++++++++++++--
1 file changed, 28 insertions(+), 2 deletions(-)

diff --git a/arch/arm64.c b/arch/arm64.c
index da8f43c62380..df58e92536e2 100644
--- a/arch/arm64.c
+++ b/arch/arm64.c
@@ -54,7 +54,9 @@ static int va_bits;
#define PAGE_MASK (~(PAGESIZE() - 1))
#define PGDIR_SHIFT ((PAGESHIFT() - 3) * pgtable_level + 3)
#define PTRS_PER_PGD (1 << (va_bits - PGDIR_SHIFT))
+#define PUD_SHIFT get_pud_shift_arm64()
#define PTRS_PER_PTE (1 << (PAGESHIFT() - 3))
+#define PTRS_PER_PUD PTRS_PER_PTE
#define PMD_SHIFT ((PAGESHIFT() - 3) * 2 + 3)
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE - 1))
@@ -89,6 +91,18 @@ static int va_bits;
#define pmd_offset_pgtbl_lvl_2(pud, vaddr) ((pmd_t *)pud)
#define pmd_offset_pgtbl_lvl_3(pud, vaddr) ((pmd_t *)pud_page_paddr((*pud)) + pmd_index(vaddr))

+#define pud_index(vaddr) (((vaddr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+#define pgd_page_paddr(pgd) (pgd_val(pgd) & PHYS_MASK & (int32_t)PAGE_MASK)
+
+static int
+get_pud_shift_arm64(void)
+{
+ if (pgtable_level == 4)
+ return ((PAGESHIFT() - 3) * 3 + 3);
+ else
+ return PGDIR_SHIFT;
+}
+
static pmd_t *
pmd_offset(pud_t *puda, pud_t *pudv, unsigned long vaddr)
{
@@ -99,6 +113,15 @@ pmd_offset(pud_t *puda, pud_t *pudv, unsigned long vaddr)
}
}

+static pud_t *
+pud_offset(pgd_t *pgda, pgd_t *pgdv, unsigned long vaddr)
+{
+ if (pgtable_level == 4)
+ return ((pud_t *)pgd_page_paddr((*pgdv)) + pud_index(vaddr));
+ else
+ return (pud_t *)(pgda);
+}
+
static int calculate_plat_config(void)
{
va_bits = NUMBER(VA_BITS);
@@ -212,8 +235,11 @@ vaddr_to_paddr_arm64(unsigned long vaddr)
return NOT_PADDR;
}

- pudv.pgd = pgdv;
- puda = (pud_t *)pgda;
+ puda = pud_offset(pgda, &pgdv, vaddr);
+ if (!readmem(PADDR, (unsigned long long)puda, &pudv, sizeof(pudv))) {
+ ERRMSG("Can't read pud\n");
+ return NOT_PADDR;
+ }

pmda = pmd_offset(puda, &pudv, vaddr);
if (!readmem(PADDR, (unsigned long long)pmda, &pmdv, sizeof(pmdv))) {
--
2.7.4
Pratyush Anand
2016-10-25 07:23:02 UTC
Permalink
arm64 memory layout has been changed since 4.6 kernel because of KASLR
support patches.

kimage_voffset is needed to calculate "virtual to physical".

Signed-off-by: Pratyush Anand <***@redhat.com>
---
arch/arm64.c | 16 ++++++++++++++--
makedumpfile.c | 2 ++
makedumpfile.h | 1 +
3 files changed, 17 insertions(+), 2 deletions(-)

diff --git a/arch/arm64.c b/arch/arm64.c
index df58e92536e2..958f57f8e203 100644
--- a/arch/arm64.c
+++ b/arch/arm64.c
@@ -41,10 +41,12 @@ typedef struct {

static int pgtable_level;
static int va_bits;
+static unsigned long kimage_voffset;

#define SZ_4K (4 * 1024)
#define SZ_16K (16 * 1024)
#define SZ_64K (64 * 1024)
+#define SZ_128M (128 * 1024 * 1024)

#define pgd_val(x) ((x).pgd)
#define pud_val(x) (pgd_val((x).pgd))
@@ -77,8 +79,6 @@ static int va_bits;
#define PMD_TYPE_SECT 1
#define PMD_TYPE_TABLE 3

-#define __pa(vaddr) ((vaddr) - PAGE_OFFSET + info->phys_base)
-
#define pgd_index(vaddr) (((vaddr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
#define pgd_offset(pgdir, vaddr) ((pgd_t *)(pgdir) + pgd_index(vaddr))

@@ -94,6 +94,16 @@ static int va_bits;
#define pud_index(vaddr) (((vaddr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
#define pgd_page_paddr(pgd) (pgd_val(pgd) & PHYS_MASK & (int32_t)PAGE_MASK)

+static unsigned long long
+__pa(unsigned long vaddr)
+{
+ if (kimage_voffset == NOT_FOUND_NUMBER ||
+ (vaddr >= PAGE_OFFSET))
+ return (vaddr - PAGE_OFFSET + info->phys_base);
+ else
+ return (vaddr - kimage_voffset);
+}
+
static int
get_pud_shift_arm64(void)
{
@@ -169,10 +179,12 @@ get_machdep_info_arm64(void)
return FALSE;
}

+ kimage_voffset = NUMBER(kimage_voffset);
info->max_physmem_bits = PHYS_MASK_SHIFT;
info->section_size_bits = SECTIONS_SIZE_BITS;
info->page_offset = 0xffffffffffffffffUL << (va_bits - 1);

+ DEBUG_MSG("kimage_voffset : %lx\n", kimage_voffset);
DEBUG_MSG("max_physmem_bits : %lx\n", info->max_physmem_bits);
DEBUG_MSG("section_size_bits: %lx\n", info->section_size_bits);
DEBUG_MSG("page_offset : %lx\n", info->page_offset);
diff --git a/makedumpfile.c b/makedumpfile.c
index df413f066348..1fa9a63359c4 100644
--- a/makedumpfile.c
+++ b/makedumpfile.c
@@ -2266,6 +2266,7 @@ write_vmcoreinfo_data(void)
#ifdef __aarch64__
WRITE_NUMBER("VA_BITS", VA_BITS);
WRITE_NUMBER_UNSIGNED("PHYS_OFFSET", PHYS_OFFSET);
+ WRITE_NUMBER_UNSIGNED("kimage_voffset", kimage_voffset);
#endif

/*
@@ -2652,6 +2653,7 @@ read_vmcoreinfo(void)
#ifdef __aarch64__
READ_NUMBER("VA_BITS", VA_BITS);
READ_NUMBER_UNSIGNED("PHYS_OFFSET", PHYS_OFFSET);
+ READ_NUMBER_UNSIGNED("kimage_voffset", kimage_voffset);
#endif

READ_NUMBER("HUGETLB_PAGE_DTOR", HUGETLB_PAGE_DTOR);
diff --git a/makedumpfile.h b/makedumpfile.h
index 8ddc2c43cc29..f0154226bcb8 100644
--- a/makedumpfile.h
+++ b/makedumpfile.h
@@ -1737,6 +1737,7 @@ struct number_table {
#ifdef __aarch64__
long VA_BITS;
unsigned long PHYS_OFFSET;
+ unsigned long kimage_voffset;
#endif
};
--
2.7.4
Pratyush Anand
2016-10-25 10:34:50 UTC
Permalink
Hi Sameer,

Although you had already tested my tree on your platform in past.
However, it would be nice if you can test these posted versions of
patches just for sanity and provide your tested-by.

~Pratyush
These patches were lying in my tree for quite some time now. VMCOREINFO
numbers/symbols used in these patches have been made part of kdump kernel
patches for last few versions including v26. So, there seems no contention
on embedding VA_BITS, kimage_voffset and PHYS_OFFSET into vmcore.
These patches cleans up a lot arm64 code and also immunize it with many
kernel changes. Additionally,it adds 4 level 4K page support and support for
KASLR enabled kernel.
arm64: Add support for 4level 4K page translations table
arm64: cleanup code, comment, blank space, blank lines etc
read_vmcoreinfo_long: Allow to read hex values as well
Introduce read_vmcoreinfo_ulong()
arm64: use already available PAGESIZE() and PAGESHIFT() macros
arm64: fix page_offset definition
arm64: fix re-filtering
arm64: use value of VA_BITS and PHYS_OFFSET embedded into vmcore
arm64: immunize identity mapped address finding w.r.t. kernel changes
arm64: fix memory layout as per changes in v4.6 kernel
arch/arm64.c | 245 ++++++++++++++++++++++-----------------------------------
makedumpfile.c | 46 +++++++++++
makedumpfile.h | 28 +++++--
3 files changed, 162 insertions(+), 157 deletions(-)
Atsushi Kumagai
2016-11-01 09:11:32 UTC
Permalink
Hello Pratyush,
These patches were lying in my tree for quite some time now. VMCOREINFO
numbers/symbols used in these patches have been made part of kdump kernel
patches for last few versions including v26. So, there seems no contention
on embedding VA_BITS, kimage_voffset and PHYS_OFFSET into vmcore.
These patches cleans up a lot arm64 code and also immunize it with many
kernel changes. Additionally,it adds 4 level 4K page support and support for
KASLR enabled kernel.
arm64: Add support for 4level 4K page translations table
arm64: cleanup code, comment, blank space, blank lines etc
read_vmcoreinfo_long: Allow to read hex values as well
Introduce read_vmcoreinfo_ulong()
arm64: use already available PAGESIZE() and PAGESHIFT() macros
arm64: fix page_offset definition
arm64: fix re-filtering
arm64: use value of VA_BITS and PHYS_OFFSET embedded into vmcore
arm64: immunize identity mapped address finding w.r.t. kernel changes
arm64: fix memory layout as per changes in v4.6 kernel
arch/arm64.c | 245 ++++++++++++++++++++++-----------------------------------
makedumpfile.c | 46 +++++++++++
makedumpfile.h | 28 +++++--
3 files changed, 162 insertions(+), 157 deletions(-)
Thanks always for your work for arm64, I've reviewed this patch set.
I'll merge them into v1.6.1.

Regards,
Atsushi Kumagai
Pratyush Anand
2016-11-15 06:34:14 UTC
Permalink
Hi Atsushi,

There would be a conflict because of following patch while applying
these patches. Other than that I also see a an issue with --config
option. So I will fix that as well and repost the series soon.

0068010b9b83 [PATCH v2 2/2] Clean up unused KERNEL_IMAGE_SIZE

~Pratyush

On Tue, Nov 1, 2016 at 2:41 PM, Atsushi Kumagai
Post by Atsushi Kumagai
Hello Pratyush,
These patches were lying in my tree for quite some time now. VMCOREINFO
numbers/symbols used in these patches have been made part of kdump kernel
patches for last few versions including v26. So, there seems no contention
on embedding VA_BITS, kimage_voffset and PHYS_OFFSET into vmcore.
These patches cleans up a lot arm64 code and also immunize it with many
kernel changes. Additionally,it adds 4 level 4K page support and support for
KASLR enabled kernel.
arm64: Add support for 4level 4K page translations table
arm64: cleanup code, comment, blank space, blank lines etc
read_vmcoreinfo_long: Allow to read hex values as well
Introduce read_vmcoreinfo_ulong()
arm64: use already available PAGESIZE() and PAGESHIFT() macros
arm64: fix page_offset definition
arm64: fix re-filtering
arm64: use value of VA_BITS and PHYS_OFFSET embedded into vmcore
arm64: immunize identity mapped address finding w.r.t. kernel changes
arm64: fix memory layout as per changes in v4.6 kernel
arch/arm64.c | 245 ++++++++++++++++++++++-----------------------------------
makedumpfile.c | 46 +++++++++++
makedumpfile.h | 28 +++++--
3 files changed, 162 insertions(+), 157 deletions(-)
Thanks always for your work for arm64, I've reviewed this patch set.
I'll merge them into v1.6.1.
Regards,
Atsushi Kumagai
Pratyush Anand
2016-11-15 11:18:22 UTC
Permalink
Post by Pratyush Anand
Hi Atsushi,
There would be a conflict because of following patch while applying
these patches. Other than that I also see a an issue with --config
option. So I will fix that as well and repost the series soon.
Sorry, for the noise.That was a bad compilation of the code.

Anyway, I resolved the conflict because of below patch in upstream
devel branch and pushed the patches in my git tree:
https://github.com/pratyushanand/makedumpfile.git : arm64_devel

There is no changes other than conflict resolution w.r.t. this patch series.

~Pratyush
Post by Pratyush Anand
0068010b9b83 [PATCH v2 2/2] Clean up unused KERNEL_IMAGE_SIZE
Atsushi Kumagai
2016-11-16 05:44:41 UTC
Permalink
Hello Pratyush,
Post by Pratyush Anand
Post by Pratyush Anand
Hi Atsushi,
There would be a conflict because of following patch while applying
these patches. Other than that I also see a an issue with --config
option. So I will fix that as well and repost the series soon.
Sorry, for the noise.That was a bad compilation of the code.
Anyway, I resolved the conflict because of below patch in upstream
https://github.com/pratyushanand/makedumpfile.git : arm64_devel
There is no changes other than conflict resolution w.r.t. this patch series.
~Pratyush
Post by Pratyush Anand
0068010b9b83 [PATCH v2 2/2] Clean up unused KERNEL_IMAGE_SIZE
I'm sorry ! I misunderstood that I already pushed this series
in the devel branch. Should I wait for v2 series where the
conflict is resolved ?


Thanks,
Atsushi Kumagai
Pratyush Anand
2016-11-16 08:33:02 UTC
Permalink
On Wed, Nov 16, 2016 at 11:14 AM, Atsushi Kumagai
Post by Atsushi Kumagai
Hello Pratyush,
Post by Pratyush Anand
Post by Pratyush Anand
Hi Atsushi,
There would be a conflict because of following patch while applying
these patches. Other than that I also see a an issue with --config
option. So I will fix that as well and repost the series soon.
Sorry, for the noise.That was a bad compilation of the code.
Anyway, I resolved the conflict because of below patch in upstream
https://github.com/pratyushanand/makedumpfile.git : arm64_devel
There is no changes other than conflict resolution w.r.t. this patch series.
~Pratyush
Post by Pratyush Anand
0068010b9b83 [PATCH v2 2/2] Clean up unused KERNEL_IMAGE_SIZE
I'm sorry ! I misunderstood that I already pushed this series
in the devel branch. Should I wait for v2 series where the
conflict is resolved ?
As you wish. I can send v2 (there is no difference other than conflict
resolution for rebasing on top of current devel, conflict is minor),
or you can pick them from
https://github.com/pratyushanand/makedumpfile.git : arm64_devel.

I am OK with whatever is convenient for you.

Thanks
Pratyush
Atsushi Kumagai
2016-11-16 08:40:03 UTC
Permalink
Post by Pratyush Anand
Post by Atsushi Kumagai
Post by Pratyush Anand
Post by Pratyush Anand
Hi Atsushi,
There would be a conflict because of following patch while applying
these patches. Other than that I also see a an issue with --config
option. So I will fix that as well and repost the series soon.
Sorry, for the noise.That was a bad compilation of the code.
Anyway, I resolved the conflict because of below patch in upstream
https://github.com/pratyushanand/makedumpfile.git : arm64_devel
There is no changes other than conflict resolution w.r.t. this patch series.
~Pratyush
Post by Pratyush Anand
0068010b9b83 [PATCH v2 2/2] Clean up unused KERNEL_IMAGE_SIZE
I'm sorry ! I misunderstood that I already pushed this series
in the devel branch. Should I wait for v2 series where the
conflict is resolved ?
As you wish. I can send v2 (there is no difference other than conflict
resolution for rebasing on top of current devel, conflict is minor),
or you can pick them from
https://github.com/pratyushanand/makedumpfile.git : arm64_devel.
Sure, I'll pick them (from 60352bd to 3b5faac, right?) up from
your devel branch, thanks.

Regards,
Atsushi Kumagai
Pratyush Anand
2016-11-17 04:36:38 UTC
Permalink
Post by Atsushi Kumagai
Post by Pratyush Anand
As you wish. I can send v2 (there is no difference other than conflict
Post by Pratyush Anand
resolution for rebasing on top of current devel, conflict is minor),
or you can pick them from
https://github.com/pratyushanand/makedumpfile.git : arm64_devel.
Sure, I'll pick them (from 60352bd to 3b5faac, right?) up from
your devel branch, thanks.
Yes, Correct.

Thanks

~Pratyush
Goel, Sameer
2016-12-08 00:18:27 UTC
Permalink
Works great for 4l 4k pages.
These patches were lying in my tree for quite some time now. VMCOREINFO
numbers/symbols used in these patches have been made part of kdump kernel
patches for last few versions including v26. So, there seems no contention
on embedding VA_BITS, kimage_voffset and PHYS_OFFSET into vmcore.
These patches cleans up a lot arm64 code and also immunize it with many
kernel changes. Additionally,it adds 4 level 4K page support and support for
KASLR enabled kernel.
arm64: Add support for 4level 4K page translations table
arm64: cleanup code, comment, blank space, blank lines etc
read_vmcoreinfo_long: Allow to read hex values as well
Introduce read_vmcoreinfo_ulong()
arm64: use already available PAGESIZE() and PAGESHIFT() macros
arm64: fix page_offset definition
arm64: fix re-filtering
arm64: use value of VA_BITS and PHYS_OFFSET embedded into vmcore
arm64: immunize identity mapped address finding w.r.t. kernel changes
arm64: fix memory layout as per changes in v4.6 kernel
arch/arm64.c | 245 ++++++++++++++++++++++-----------------------------------
makedumpfile.c | 46 +++++++++++
makedumpfile.h | 28 +++++--
3 files changed, 162 insertions(+), 157 deletions(-)
--
Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project.
Loading...