Files
linux-legacy/fs/proc/kcore.c
KAMEZAWA Hiroyuki 1d831f6270 /proc/kcore: work around a BUG()
Not upstream due to other fixes in .32


Works around a BUG() which is triggered when the kernel accesses holes in
vmalloc regions.

BUG: unable to handle kernel paging request at fa54c000
IP: [<c04f687a>] read_kcore+0x260/0x31a
*pde = 3540b067 *pte = 00000000
Oops: 0000 [#1] SMP
last sysfs file: /sys/devices/pci0000:00/0000:00:1c.2/0000:03:00.0/ieee80211/phy0/rfkill0/state
Modules linked in: fuse sco bridge stp llc bnep l2cap bluetooth sunrpc nf_conntrack_ftp ip6t_REJECT nf_conntrack_ipv6 ip6table_filter ip6_tables ipv6 cpufreq_ondemand acpi_cpufreq dm_multipath uinput usb_storage arc4 ecb snd_hda_codec_realtek snd_hda_intel ath5k snd_hda_codec snd_hwdep iTCO_wdt snd_pcm iTCO_vendor_support pcspkr i2c_i801 mac80211 joydev snd_timer serio_raw r8169 snd soundcore mii snd_page_alloc ath cfg80211 ata_generic i915 drm i2c_algo_bit i2c_core video output [last unloaded: scsi_wait_scan]
Sep  4 12:45:16 tuxedu kernel: Pid: 2266, comm: cat Not tainted (2.6.31-rc8 #2) Joybook Lite U101
EIP: 0060:[<c04f687a>] EFLAGS: 00010286 CPU: 0
EIP is at read_kcore+0x260/0x31a
EAX: f5e5ea00 EBX: fa54d000 ECX: 00000400 EDX: 00001000
ESI: fa54c000 EDI: f44ad000 EBP: e4533f4c ESP: e4533f24
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
Process cat (pid: 2266, ti=e4532000 task=f09d19a0 task.ti=e4532000)
Stack:
00005000 00000000 f44ad000 09d9c000 00003000 fa54c000 00001000 f6d16f60
 e4520b80 fffffffb e4533f70 c04ef8eb e4533f98 00008000 09d97000 c04f661a
 e4520b80 09d97000 c04ef88c e4533f8c c04ba531 e4533f98 c04c0930 e4520b80
Call Trace:
[<c04ef8eb>] ? proc_reg_read+0x5f/0x73
[<c04f661a>] ? read_kcore+0x0/0x31a
[<c04ef88c>] ? proc_reg_read+0x0/0x73
[<c04ba531>] ? vfs_read+0x82/0xe1
[<c04c0930>] ? path_put+0x1a/0x1d
[<c04ba62e>] ? sys_read+0x40/0x62
[<c0403298>] ? sysenter_do_call+0x12/0x2d
Code: 39 f3 89 ca 0f 43 f3 89 fb 29 f2 29 f3 39 cf 0f 46 d3 29 55 dc 8d 1c 32 f6 40 0c 01 75 18 89 d1 89 f7 c1 e9 02 2b 7d ec 03 7d e0 <f3> a5 89 d1 83 e1 03 74 02 f3 a4 8b 00 83 7d dc 00 74 04 85 c0
EIP: [<c04f687a>] read_kcore+0x260/0x31a SS:ESP 0068:e4533f24
CR2: 00000000fa54c000


To access vmalloc area which may have memory holes, copy_from_user is
useful.  So this:

 # cat /proc/kcore > /dev/null

will not panic.

This is a minimal fix, suitable for 2.6.30.x and 2.6.31.  More extensive
/proc/kcore changes are planned for 2.6.32.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tested-by: Nick Craig-Wood <nick@craig-wood.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Reported-by: <kbowa@tuxedu.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2009-10-05 09:32:54 -07:00

420 lines
10 KiB
C

/*
* fs/proc/kcore.c kernel ELF core dumper
*
* Modelled on fs/exec.c:aout_core_dump()
* Jeremy Fitzhardinge <jeremy@sw.oz.au>
* ELF version written by David Howells <David.Howells@nexor.co.uk>
* Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
* Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
* Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
*/
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/user.h>
#include <linux/capability.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#define CORE_STR "CORE"
#ifndef ELF_CORE_EFLAGS
#define ELF_CORE_EFLAGS 0
#endif
static struct proc_dir_entry *proc_root_kcore;
static int open_kcore(struct inode * inode, struct file * filp)
{
return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}
static ssize_t read_kcore(struct file *, char __user *, size_t, loff_t *);
static const struct file_operations proc_kcore_operations = {
.read = read_kcore,
.open = open_kcore,
};
#ifndef kc_vaddr_to_offset
#define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
#endif
#ifndef kc_offset_to_vaddr
#define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
#endif
/* An ELF note in memory */
struct memelfnote
{
const char *name;
int type;
unsigned int datasz;
void *data;
};
static struct kcore_list *kclist;
static DEFINE_RWLOCK(kclist_lock);
void
kclist_add(struct kcore_list *new, void *addr, size_t size)
{
new->addr = (unsigned long)addr;
new->size = size;
write_lock(&kclist_lock);
new->next = kclist;
kclist = new;
write_unlock(&kclist_lock);
}
static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
{
size_t try, size;
struct kcore_list *m;
*nphdr = 1; /* PT_NOTE */
size = 0;
for (m=kclist; m; m=m->next) {
try = kc_vaddr_to_offset((size_t)m->addr + m->size);
if (try > size)
size = try;
*nphdr = *nphdr + 1;
}
*elf_buflen = sizeof(struct elfhdr) +
(*nphdr + 2)*sizeof(struct elf_phdr) +
3 * ((sizeof(struct elf_note)) +
roundup(sizeof(CORE_STR), 4)) +
roundup(sizeof(struct elf_prstatus), 4) +
roundup(sizeof(struct elf_prpsinfo), 4) +
roundup(sizeof(struct task_struct), 4);
*elf_buflen = PAGE_ALIGN(*elf_buflen);
return size + *elf_buflen;
}
/*****************************************************************************/
/*
* determine size of ELF note
*/
static int notesize(struct memelfnote *en)
{
int sz;
sz = sizeof(struct elf_note);
sz += roundup((strlen(en->name) + 1), 4);
sz += roundup(en->datasz, 4);
return sz;
} /* end notesize() */
/*****************************************************************************/
/*
* store a note in the header buffer
*/
static char *storenote(struct memelfnote *men, char *bufp)
{
struct elf_note en;
#define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
en.n_namesz = strlen(men->name) + 1;
en.n_descsz = men->datasz;
en.n_type = men->type;
DUMP_WRITE(&en, sizeof(en));
DUMP_WRITE(men->name, en.n_namesz);
/* XXX - cast from long long to long to avoid need for libgcc.a */
bufp = (char*) roundup((unsigned long)bufp,4);
DUMP_WRITE(men->data, men->datasz);
bufp = (char*) roundup((unsigned long)bufp,4);
#undef DUMP_WRITE
return bufp;
} /* end storenote() */
/*
* store an ELF coredump header in the supplied buffer
* nphdr is the number of elf_phdr to insert
*/
static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
{
struct elf_prstatus prstatus; /* NT_PRSTATUS */
struct elf_prpsinfo prpsinfo; /* NT_PRPSINFO */
struct elf_phdr *nhdr, *phdr;
struct elfhdr *elf;
struct memelfnote notes[3];
off_t offset = 0;
struct kcore_list *m;
/* setup ELF header */
elf = (struct elfhdr *) bufp;
bufp += sizeof(struct elfhdr);
offset += sizeof(struct elfhdr);
memcpy(elf->e_ident, ELFMAG, SELFMAG);
elf->e_ident[EI_CLASS] = ELF_CLASS;
elf->e_ident[EI_DATA] = ELF_DATA;
elf->e_ident[EI_VERSION]= EV_CURRENT;
elf->e_ident[EI_OSABI] = ELF_OSABI;
memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
elf->e_type = ET_CORE;
elf->e_machine = ELF_ARCH;
elf->e_version = EV_CURRENT;
elf->e_entry = 0;
elf->e_phoff = sizeof(struct elfhdr);
elf->e_shoff = 0;
elf->e_flags = ELF_CORE_EFLAGS;
elf->e_ehsize = sizeof(struct elfhdr);
elf->e_phentsize= sizeof(struct elf_phdr);
elf->e_phnum = nphdr;
elf->e_shentsize= 0;
elf->e_shnum = 0;
elf->e_shstrndx = 0;
/* setup ELF PT_NOTE program header */
nhdr = (struct elf_phdr *) bufp;
bufp += sizeof(struct elf_phdr);
offset += sizeof(struct elf_phdr);
nhdr->p_type = PT_NOTE;
nhdr->p_offset = 0;
nhdr->p_vaddr = 0;
nhdr->p_paddr = 0;
nhdr->p_filesz = 0;
nhdr->p_memsz = 0;
nhdr->p_flags = 0;
nhdr->p_align = 0;
/* setup ELF PT_LOAD program header for every area */
for (m=kclist; m; m=m->next) {
phdr = (struct elf_phdr *) bufp;
bufp += sizeof(struct elf_phdr);
offset += sizeof(struct elf_phdr);
phdr->p_type = PT_LOAD;
phdr->p_flags = PF_R|PF_W|PF_X;
phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff;
phdr->p_vaddr = (size_t)m->addr;
phdr->p_paddr = 0;
phdr->p_filesz = phdr->p_memsz = m->size;
phdr->p_align = PAGE_SIZE;
}
/*
* Set up the notes in similar form to SVR4 core dumps made
* with info from their /proc.
*/
nhdr->p_offset = offset;
/* set up the process status */
notes[0].name = CORE_STR;
notes[0].type = NT_PRSTATUS;
notes[0].datasz = sizeof(struct elf_prstatus);
notes[0].data = &prstatus;
memset(&prstatus, 0, sizeof(struct elf_prstatus));
nhdr->p_filesz = notesize(&notes[0]);
bufp = storenote(&notes[0], bufp);
/* set up the process info */
notes[1].name = CORE_STR;
notes[1].type = NT_PRPSINFO;
notes[1].datasz = sizeof(struct elf_prpsinfo);
notes[1].data = &prpsinfo;
memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
prpsinfo.pr_state = 0;
prpsinfo.pr_sname = 'R';
prpsinfo.pr_zomb = 0;
strcpy(prpsinfo.pr_fname, "vmlinux");
strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ);
nhdr->p_filesz += notesize(&notes[1]);
bufp = storenote(&notes[1], bufp);
/* set up the task structure */
notes[2].name = CORE_STR;
notes[2].type = NT_TASKSTRUCT;
notes[2].datasz = sizeof(struct task_struct);
notes[2].data = current;
nhdr->p_filesz += notesize(&notes[2]);
bufp = storenote(&notes[2], bufp);
} /* end elf_kcore_store_hdr() */
/*****************************************************************************/
/*
* read from the ELF header and then kernel memory
*/
static ssize_t
read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
{
ssize_t acc = 0;
size_t size, tsz;
size_t elf_buflen;
int nphdr;
unsigned long start;
read_lock(&kclist_lock);
proc_root_kcore->size = size = get_kcore_size(&nphdr, &elf_buflen);
if (buflen == 0 || *fpos >= size) {
read_unlock(&kclist_lock);
return 0;
}
/* trim buflen to not go beyond EOF */
if (buflen > size - *fpos)
buflen = size - *fpos;
/* construct an ELF core header if we'll need some of it */
if (*fpos < elf_buflen) {
char * elf_buf;
tsz = elf_buflen - *fpos;
if (buflen < tsz)
tsz = buflen;
elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
if (!elf_buf) {
read_unlock(&kclist_lock);
return -ENOMEM;
}
elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
read_unlock(&kclist_lock);
if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
kfree(elf_buf);
return -EFAULT;
}
kfree(elf_buf);
buflen -= tsz;
*fpos += tsz;
buffer += tsz;
acc += tsz;
/* leave now if filled buffer already */
if (buflen == 0)
return acc;
} else
read_unlock(&kclist_lock);
/*
* Check to see if our file offset matches with any of
* the addresses in the elf_phdr on our list.
*/
start = kc_offset_to_vaddr(*fpos - elf_buflen);
if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
tsz = buflen;
while (buflen) {
struct kcore_list *m;
read_lock(&kclist_lock);
for (m=kclist; m; m=m->next) {
if (start >= m->addr && start < (m->addr+m->size))
break;
}
read_unlock(&kclist_lock);
if (m == NULL) {
if (clear_user(buffer, tsz))
return -EFAULT;
} else if (is_vmalloc_addr((void *)start)) {
char * elf_buf;
struct vm_struct *m;
unsigned long curstart = start;
unsigned long cursize = tsz;
elf_buf = kzalloc(tsz, GFP_KERNEL);
if (!elf_buf)
return -ENOMEM;
read_lock(&vmlist_lock);
for (m=vmlist; m && cursize; m=m->next) {
unsigned long vmstart;
unsigned long vmsize;
unsigned long msize = m->size - PAGE_SIZE;
if (((unsigned long)m->addr + msize) <
curstart)
continue;
if ((unsigned long)m->addr > (curstart +
cursize))
break;
vmstart = (curstart < (unsigned long)m->addr ?
(unsigned long)m->addr : curstart);
if (((unsigned long)m->addr + msize) >
(curstart + cursize))
vmsize = curstart + cursize - vmstart;
else
vmsize = (unsigned long)m->addr +
msize - vmstart;
curstart = vmstart + vmsize;
cursize -= vmsize;
/* don't dump ioremap'd stuff! (TA) */
if (m->flags & VM_IOREMAP)
continue;
/*
* we may access memory holes, then use
* ex_table. checking return value just for
* avoid warnings.
*/
vmsize = __copy_from_user_inatomic(
elf_buf + (vmstart - start),
(char *)vmstart, vmsize);
}
read_unlock(&vmlist_lock);
if (copy_to_user(buffer, elf_buf, tsz)) {
kfree(elf_buf);
return -EFAULT;
}
kfree(elf_buf);
} else {
if (kern_addr_valid(start)) {
unsigned long n;
n = copy_to_user(buffer, (char *)start, tsz);
/*
* We cannot distingush between fault on source
* and fault on destination. When this happens
* we clear too and hope it will trigger the
* EFAULT again.
*/
if (n) {
if (clear_user(buffer + tsz - n,
n))
return -EFAULT;
}
} else {
if (clear_user(buffer, tsz))
return -EFAULT;
}
}
buflen -= tsz;
*fpos += tsz;
buffer += tsz;
acc += tsz;
start += tsz;
tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
}
return acc;
}
static int __init proc_kcore_init(void)
{
proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &proc_kcore_operations);
if (proc_root_kcore)
proc_root_kcore->size =
(size_t)high_memory - PAGE_OFFSET + PAGE_SIZE;
return 0;
}
module_init(proc_kcore_init);