Reviewed and applied spl-01-rm-gpl-symbol-set_cpus_allowed.patch
from Ricardo which removes a dependency on the GPL-only symbol set_cpus_allowed(). Using this symbol is simpler but in the name of portability we are adopting a spinlock based solution here to remove this dependency. git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@160 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
parent
d50bd9e221
commit
f6c81c5ea7
|
@ -157,6 +157,15 @@ union trace_data_union {
|
||||||
unsigned short tcd_type;
|
unsigned short tcd_type;
|
||||||
/* The factors to share debug memory. */
|
/* The factors to share debug memory. */
|
||||||
unsigned short tcd_pages_factor;
|
unsigned short tcd_pages_factor;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This spinlock is needed to workaround the problem of
|
||||||
|
* set_cpus_allowed() being GPL-only. Since we cannot
|
||||||
|
* schedule a thread on a specific CPU when dumping the
|
||||||
|
* pages, we must use the spinlock for mutual exclusion.
|
||||||
|
*/
|
||||||
|
spinlock_t tcd_lock;
|
||||||
|
unsigned long tcd_lock_flags;
|
||||||
} tcd;
|
} tcd;
|
||||||
char __pad[L1_CACHE_ALIGN(sizeof(struct trace_cpu_data))];
|
char __pad[L1_CACHE_ALIGN(sizeof(struct trace_cpu_data))];
|
||||||
};
|
};
|
||||||
|
@ -168,9 +177,9 @@ extern union trace_data_union (*trace_data[TCD_TYPE_MAX])[NR_CPUS];
|
||||||
for (j = 0, ((tcd) = &(*trace_data[i])[j].tcd); \
|
for (j = 0, ((tcd) = &(*trace_data[i])[j].tcd); \
|
||||||
j < num_possible_cpus(); j++, (tcd) = &(*trace_data[i])[j].tcd)
|
j < num_possible_cpus(); j++, (tcd) = &(*trace_data[i])[j].tcd)
|
||||||
|
|
||||||
#define tcd_for_each_type_lock(tcd, i) \
|
#define tcd_for_each_type_lock(tcd, i, cpu) \
|
||||||
for (i = 0; trace_data[i] && \
|
for (i = 0; trace_data[i] && \
|
||||||
(tcd = &(*trace_data[i])[smp_processor_id()].tcd) && \
|
(tcd = &(*trace_data[i])[cpu].tcd) && \
|
||||||
trace_lock_tcd(tcd); trace_unlock_tcd(tcd), i++)
|
trace_lock_tcd(tcd); trace_unlock_tcd(tcd), i++)
|
||||||
|
|
||||||
struct trace_page {
|
struct trace_page {
|
||||||
|
|
|
@ -40,6 +40,7 @@
|
||||||
#include <linux/kthread.h>
|
#include <linux/kthread.h>
|
||||||
#include <linux/hardirq.h>
|
#include <linux/hardirq.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/spinlock.h>
|
||||||
#include <sys/sysmacros.h>
|
#include <sys/sysmacros.h>
|
||||||
#include <sys/proc.h>
|
#include <sys/proc.h>
|
||||||
#include <sys/debug.h>
|
#include <sys/debug.h>
|
||||||
|
@ -424,35 +425,12 @@ trace_put_console_buffer(char *buffer)
|
||||||
put_cpu();
|
put_cpu();
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct trace_cpu_data *
|
|
||||||
trace_get_tcd(void)
|
|
||||||
{
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
cpu = get_cpu();
|
|
||||||
if (in_irq())
|
|
||||||
return &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
|
|
||||||
else if (in_softirq())
|
|
||||||
return &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
|
|
||||||
|
|
||||||
return &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
trace_put_tcd (struct trace_cpu_data *tcd)
|
|
||||||
{
|
|
||||||
put_cpu();
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
trace_lock_tcd(struct trace_cpu_data *tcd)
|
trace_lock_tcd(struct trace_cpu_data *tcd)
|
||||||
{
|
{
|
||||||
__ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
|
__ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
|
||||||
|
|
||||||
if (tcd->tcd_type == TCD_TYPE_IRQ)
|
spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
|
||||||
local_irq_disable();
|
|
||||||
else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
|
|
||||||
local_bh_disable();
|
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -462,10 +440,34 @@ trace_unlock_tcd(struct trace_cpu_data *tcd)
|
||||||
{
|
{
|
||||||
__ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
|
__ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
|
||||||
|
|
||||||
if (tcd->tcd_type == TCD_TYPE_IRQ)
|
spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
|
||||||
local_irq_enable();
|
}
|
||||||
else if (tcd->tcd_type == TCD_TYPE_SOFTIRQ)
|
|
||||||
local_bh_enable();
|
static struct trace_cpu_data *
|
||||||
|
trace_get_tcd(void)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
struct trace_cpu_data *tcd;
|
||||||
|
|
||||||
|
cpu = get_cpu();
|
||||||
|
if (in_irq())
|
||||||
|
tcd = &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
|
||||||
|
else if (in_softirq())
|
||||||
|
tcd = &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
|
||||||
|
else
|
||||||
|
tcd = &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
|
||||||
|
|
||||||
|
trace_lock_tcd(tcd);
|
||||||
|
|
||||||
|
return tcd;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
trace_put_tcd (struct trace_cpu_data *tcd)
|
||||||
|
{
|
||||||
|
trace_unlock_tcd(tcd);
|
||||||
|
|
||||||
|
put_cpu();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -525,23 +527,6 @@ trace_max_debug_mb(void)
|
||||||
return MAX(512, ((num_physpages >> (20 - PAGE_SHIFT)) * 80) / 100);
|
return MAX(512, ((num_physpages >> (20 - PAGE_SHIFT)) * 80) / 100);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
trace_call_on_all_cpus(void (*fn)(void *arg), void *arg)
|
|
||||||
{
|
|
||||||
cpumask_t mask, cpus_allowed = current->cpus_allowed;
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
|
||||||
cpus_clear(mask);
|
|
||||||
cpu_set(cpu, mask);
|
|
||||||
set_cpus_allowed(current, mask);
|
|
||||||
|
|
||||||
fn(arg);
|
|
||||||
|
|
||||||
set_cpus_allowed(current, cpus_allowed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct trace_page *
|
static struct trace_page *
|
||||||
tage_alloc(int gfp)
|
tage_alloc(int gfp)
|
||||||
{
|
{
|
||||||
|
@ -861,17 +846,18 @@ collect_pages_from_single_cpu(struct page_collection *pc)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
collect_pages_on_cpu(void *info)
|
collect_pages_on_all_cpus(struct page_collection *pc)
|
||||||
{
|
{
|
||||||
struct trace_cpu_data *tcd;
|
struct trace_cpu_data *tcd;
|
||||||
struct page_collection *pc = info;
|
int i, cpu;
|
||||||
int i;
|
|
||||||
|
|
||||||
spin_lock(&pc->pc_lock);
|
spin_lock(&pc->pc_lock);
|
||||||
tcd_for_each_type_lock(tcd, i) {
|
for_each_possible_cpu(cpu) {
|
||||||
|
tcd_for_each_type_lock(tcd, i, cpu) {
|
||||||
list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
|
list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
|
||||||
tcd->tcd_cur_pages = 0;
|
tcd->tcd_cur_pages = 0;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
spin_unlock(&pc->pc_lock);
|
spin_unlock(&pc->pc_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -883,34 +869,38 @@ collect_pages(dumplog_priv_t *dp, struct page_collection *pc)
|
||||||
if (spl_panic_in_progress || dp->dp_flags & DL_SINGLE_CPU)
|
if (spl_panic_in_progress || dp->dp_flags & DL_SINGLE_CPU)
|
||||||
collect_pages_from_single_cpu(pc);
|
collect_pages_from_single_cpu(pc);
|
||||||
else
|
else
|
||||||
trace_call_on_all_cpus(collect_pages_on_cpu, pc);
|
collect_pages_on_all_cpus(pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
put_pages_back_on_cpu(void *info)
|
put_pages_back_on_all_cpus(struct page_collection *pc)
|
||||||
{
|
{
|
||||||
struct page_collection *pc = info;
|
|
||||||
struct trace_cpu_data *tcd;
|
struct trace_cpu_data *tcd;
|
||||||
struct list_head *cur_head;
|
struct list_head *cur_head;
|
||||||
struct trace_page *tage;
|
struct trace_page *tage;
|
||||||
struct trace_page *tmp;
|
struct trace_page *tmp;
|
||||||
int i;
|
int i, cpu;
|
||||||
|
|
||||||
spin_lock(&pc->pc_lock);
|
spin_lock(&pc->pc_lock);
|
||||||
tcd_for_each_type_lock(tcd, i) {
|
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
tcd_for_each_type_lock(tcd, i, cpu) {
|
||||||
cur_head = tcd->tcd_pages.next;
|
cur_head = tcd->tcd_pages.next;
|
||||||
|
|
||||||
list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
|
list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
|
||||||
|
linkage) {
|
||||||
|
|
||||||
__ASSERT_TAGE_INVARIANT(tage);
|
__ASSERT_TAGE_INVARIANT(tage);
|
||||||
|
|
||||||
if (tage->cpu != smp_processor_id() || tage->type != i)
|
if (tage->cpu != cpu || tage->type != i)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
tage_to_tail(tage, cur_head);
|
tage_to_tail(tage, cur_head);
|
||||||
tcd->tcd_cur_pages++;
|
tcd->tcd_cur_pages++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
spin_unlock(&pc->pc_lock);
|
spin_unlock(&pc->pc_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -918,7 +908,7 @@ static void
|
||||||
put_pages_back(struct page_collection *pc)
|
put_pages_back(struct page_collection *pc)
|
||||||
{
|
{
|
||||||
if (!spl_panic_in_progress)
|
if (!spl_panic_in_progress)
|
||||||
trace_call_on_all_cpus(put_pages_back_on_cpu, pc);
|
put_pages_back_on_all_cpus(pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct file *
|
static struct file *
|
||||||
|
@ -1177,6 +1167,7 @@ trace_init(int max_pages)
|
||||||
}
|
}
|
||||||
|
|
||||||
tcd_for_each(tcd, i, j) {
|
tcd_for_each(tcd, i, j) {
|
||||||
|
spin_lock_init(&tcd->tcd_lock);
|
||||||
tcd->tcd_pages_factor = pages_factor[i];
|
tcd->tcd_pages_factor = pages_factor[i];
|
||||||
tcd->tcd_type = i;
|
tcd->tcd_type = i;
|
||||||
tcd->tcd_cpu = j;
|
tcd->tcd_cpu = j;
|
||||||
|
@ -1231,17 +1222,19 @@ debug_init(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
trace_cleanup_on_cpu(void *info)
|
trace_cleanup_on_all_cpus(void)
|
||||||
{
|
{
|
||||||
struct trace_cpu_data *tcd;
|
struct trace_cpu_data *tcd;
|
||||||
struct trace_page *tage;
|
struct trace_page *tage;
|
||||||
struct trace_page *tmp;
|
struct trace_page *tmp;
|
||||||
int i;
|
int i, cpu;
|
||||||
|
|
||||||
tcd_for_each_type_lock(tcd, i) {
|
for_each_possible_cpu(cpu) {
|
||||||
|
tcd_for_each_type_lock(tcd, i, cpu) {
|
||||||
tcd->tcd_shutting_down = 1;
|
tcd->tcd_shutting_down = 1;
|
||||||
|
|
||||||
list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
|
list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
|
||||||
|
linkage) {
|
||||||
__ASSERT_TAGE_INVARIANT(tage);
|
__ASSERT_TAGE_INVARIANT(tage);
|
||||||
|
|
||||||
list_del(&tage->linkage);
|
list_del(&tage->linkage);
|
||||||
|
@ -1250,13 +1243,14 @@ trace_cleanup_on_cpu(void *info)
|
||||||
tcd->tcd_cur_pages = 0;
|
tcd->tcd_cur_pages = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
trace_fini(void)
|
trace_fini(void)
|
||||||
{
|
{
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
trace_call_on_all_cpus(trace_cleanup_on_cpu, NULL);
|
trace_cleanup_on_all_cpus();
|
||||||
|
|
||||||
for (i = 0; i < num_possible_cpus(); i++) {
|
for (i = 0; i < num_possible_cpus(); i++) {
|
||||||
for (j = 0; j < 3; j++) {
|
for (j = 0; j < 3; j++) {
|
||||||
|
|
Loading…
Reference in New Issue