summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/process.c')
-rw-r--r--arch/powerpc/kernel/process.c148
1 files changed, 33 insertions, 115 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 1237f13fed518..9ef4aea9fffe8 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -154,6 +154,7 @@ unsigned long msr_check_and_set(unsigned long bits)
return newmsr;
}
+EXPORT_SYMBOL_GPL(msr_check_and_set);
void __msr_check_and_clear(unsigned long bits)
{
@@ -632,6 +633,7 @@ void do_break (struct pt_regs *regs, unsigned long address,
hw_breakpoint_disable();
/* Deliver the signal to userspace */
+ clear_siginfo(&info);
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_HWBKPT;
@@ -845,10 +847,6 @@ bool ppc_breakpoint_available(void)
}
EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
-#ifdef CONFIG_PPC64
-DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
-#endif
-
static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
struct arch_hw_breakpoint *b)
{
@@ -1154,7 +1152,7 @@ static inline void restore_sprs(struct thread_struct *old_thread,
mtspr(SPRN_TAR, new_thread->tar);
}
- if (cpu_has_feature(CPU_FTR_ARCH_300) &&
+ if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
old_thread->tidr != new_thread->tidr)
mtspr(SPRN_TIDR, new_thread->tidr);
#endif
@@ -1181,20 +1179,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
WARN_ON(!irqs_disabled());
-#ifdef CONFIG_PPC64
- /*
- * Collect processor utilization data per process
- */
- if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
- struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
- long unsigned start_tb, current_tb;
- start_tb = old_thread->start_tb;
- cu->current_tb = current_tb = mfspr(SPRN_PURR);
- old_thread->accum_tb += (current_tb - start_tb);
- new_thread->start_tb = current_tb;
- }
-#endif /* CONFIG_PPC64 */
-
#ifdef CONFIG_PPC_BOOK3S_64
batch = this_cpu_ptr(&ppc64_tlb_batch);
if (batch->active) {
@@ -1437,7 +1421,7 @@ void show_regs(struct pt_regs * regs)
pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
#endif
#ifdef CONFIG_PPC64
- pr_cont("SOFTE: %ld ", regs->softe);
+ pr_cont("IRQMASK: %lx ", regs->softe);
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(regs->msr))
@@ -1496,104 +1480,42 @@ int set_thread_uses_vas(void)
}
#ifdef CONFIG_PPC64
-static DEFINE_SPINLOCK(vas_thread_id_lock);
-static DEFINE_IDA(vas_thread_ida);
-
-/*
- * We need to assign a unique thread id to each thread in a process.
+/**
+ * Assign a TIDR (thread ID) for task @t and set it in the thread
+ * structure. For now, we only support setting TIDR for 'current' task.
*
- * This thread id, referred to as TIDR, and separate from the Linux's tgid,
- * is intended to be used to direct an ASB_Notify from the hardware to the
- * thread, when a suitable event occurs in the system.
+ * Since the TID value is a truncated form of it PID, it is possible
+ * (but unlikely) for 2 threads to have the same TID. In the unlikely event
+ * that 2 threads share the same TID and are waiting, one of the following
+ * cases will happen:
*
- * One such event is a "paste" instruction in the context of Fast Thread
- * Wakeup (aka Core-to-core wake up in the Virtual Accelerator Switchboard
- * (VAS) in POWER9.
+ * 1. The correct thread is running, the wrong thread is not
+ * In this situation, the correct thread is woken and proceeds to pass it's
+ * condition check.
*
- * To get a unique TIDR per process we could simply reuse task_pid_nr() but
- * the problem is that task_pid_nr() is not yet available copy_thread() is
- * called. Fixing that would require changing more intrusive arch-neutral
- * code in code path in copy_process()?.
+ * 2. Neither threads are running
+ * In this situation, neither thread will be woken. When scheduled, the waiting
+ * threads will execute either a wait, which will return immediately, followed
+ * by a condition check, which will pass for the correct thread and fail
+ * for the wrong thread, or they will execute the condition check immediately.
*
- * Further, to assign unique TIDRs within each process, we need an atomic
- * field (or an IDR) in task_struct, which again intrudes into the arch-
- * neutral code. So try to assign globally unique TIDRs for now.
+ * 3. The wrong thread is running, the correct thread is not
+ * The wrong thread will be woken, but will fail it's condition check and
+ * re-execute wait. The correct thread, when scheduled, will execute either
+ * it's condition check (which will pass), or wait, which returns immediately
+ * when called the first time after the thread is scheduled, followed by it's
+ * condition check (which will pass).
*
- * NOTE: TIDR 0 indicates that the thread does not need a TIDR value.
- * For now, only threads that expect to be notified by the VAS
- * hardware need a TIDR value and we assign values > 0 for those.
- */
-#define MAX_THREAD_CONTEXT ((1 << 16) - 1)
-static int assign_thread_tidr(void)
-{
- int index;
- int err;
- unsigned long flags;
-
-again:
- if (!ida_pre_get(&vas_thread_ida, GFP_KERNEL))
- return -ENOMEM;
-
- spin_lock_irqsave(&vas_thread_id_lock, flags);
- err = ida_get_new_above(&vas_thread_ida, 1, &index);
- spin_unlock_irqrestore(&vas_thread_id_lock, flags);
-
- if (err == -EAGAIN)
- goto again;
- else if (err)
- return err;
-
- if (index > MAX_THREAD_CONTEXT) {
- spin_lock_irqsave(&vas_thread_id_lock, flags);
- ida_remove(&vas_thread_ida, index);
- spin_unlock_irqrestore(&vas_thread_id_lock, flags);
- return -ENOMEM;
- }
-
- return index;
-}
-
-static void free_thread_tidr(int id)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&vas_thread_id_lock, flags);
- ida_remove(&vas_thread_ida, id);
- spin_unlock_irqrestore(&vas_thread_id_lock, flags);
-}
-
-/*
- * Clear any TIDR value assigned to this thread.
- */
-void clear_thread_tidr(struct task_struct *t)
-{
- if (!t->thread.tidr)
- return;
-
- if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
- WARN_ON_ONCE(1);
- return;
- }
-
- mtspr(SPRN_TIDR, 0);
- free_thread_tidr(t->thread.tidr);
- t->thread.tidr = 0;
-}
-
-void arch_release_task_struct(struct task_struct *t)
-{
- clear_thread_tidr(t);
-}
-
-/*
- * Assign a unique TIDR (thread id) for task @t and set it in the thread
- * structure. For now, we only support setting TIDR for 'current' task.
+ * 4. Both threads are running
+ * Both threads will be woken. The wrong thread will fail it's condition check
+ * and execute another wait, while the correct thread will pass it's condition
+ * check.
+ *
+ * @t: the task to set the thread ID for
*/
int set_thread_tidr(struct task_struct *t)
{
- int rc;
-
- if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ if (!cpu_has_feature(CPU_FTR_P9_TIDR))
return -EINVAL;
if (t != current)
@@ -1602,11 +1524,7 @@ int set_thread_tidr(struct task_struct *t)
if (t->thread.tidr)
return 0;
- rc = assign_thread_tidr();
- if (rc < 0)
- return rc;
-
- t->thread.tidr = rc;
+ t->thread.tidr = (u16)task_pid_nr(t);
mtspr(SPRN_TIDR, t->thread.tidr);
return 0;