x86, bts: fix wrmsr and spinlock over kmalloc
authorMarkus Metzger <markus.t.metzger@intel.com>
Tue, 25 Nov 2008 07:52:56 +0000 (08:52 +0100)
committerIngo Molnar <mingo@elte.hu>
Tue, 25 Nov 2008 16:29:02 +0000 (17:29 +0100)
Impact: fix sleeping-with-spinlock-held bugs/crashes

- Turn a wrmsr to write the DS_AREA MSR into a wrmsrl.
- Use irqsave variants of spinlocks.
- Do not allocate memory while holding spinlocks.

Reported-by: Stephane Eranian <eranian@googlemail.com>
Reported-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Markus Metzger <markus.t.metzger@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

arch/x86/kernel/ds.c

index 04e38ef..a2d1176 100644 (file)
@@ -209,14 +209,15 @@ static DEFINE_PER_CPU(struct ds_context *, system_context);
 static inline struct ds_context *ds_get_context(struct task_struct *task)
 {
        struct ds_context *context;
+       unsigned long irq;
 
-       spin_lock(&ds_lock);
+       spin_lock_irqsave(&ds_lock, irq);
 
        context = (task ? task->thread.ds_ctx : this_system_context);
        if (context)
                context->count++;
 
-       spin_unlock(&ds_lock);
+       spin_unlock_irqrestore(&ds_lock, irq);
 
        return context;
 }
@@ -224,55 +225,46 @@ static inline struct ds_context *ds_get_context(struct task_struct *task)
 /*
  * Same as ds_get_context, but allocates the context and it's DS
  * structure, if necessary; returns NULL; if out of memory.
- *
- * pre: requires ds_lock to be held
  */
 static inline struct ds_context *ds_alloc_context(struct task_struct *task)
 {
        struct ds_context **p_context =
                (task ? &task->thread.ds_ctx : &this_system_context);
        struct ds_context *context = *p_context;
+       unsigned long irq;
 
        if (!context) {
-               spin_unlock(&ds_lock);
-
                context = kzalloc(sizeof(*context), GFP_KERNEL);
-
-               if (!context) {
-                       spin_lock(&ds_lock);
+               if (!context)
                        return NULL;
-               }
 
                context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
                if (!context->ds) {
                        kfree(context);
-                       spin_lock(&ds_lock);
                        return NULL;
                }
 
-               spin_lock(&ds_lock);
-               /*
-                * Check for race - another CPU could have allocated
-                * it meanwhile:
-                */
+               spin_lock_irqsave(&ds_lock, irq);
+
                if (*p_context) {
                        kfree(context->ds);
                        kfree(context);
-                       return *p_context;
-               }
-
-               *p_context = context;
 
-               context->this = p_context;
-               context->task = task;
+                       context = *p_context;
+               } else {
+                       *p_context = context;
 
-               if (task)
-                       set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
+                       context->this = p_context;
+                       context->task = task;
 
-               if (!task || (task == current))
-                       wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0);
+                       if (task)
+                               set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
 
-               get_tracer(task);
+                       if (!task || (task == current))
+                               wrmsrl(MSR_IA32_DS_AREA,
+                                      (unsigned long)context->ds);
+               }
+               spin_unlock_irqrestore(&ds_lock, irq);
        }
 
        context->count++;
@@ -286,10 +278,12 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
  */
 static inline void ds_put_context(struct ds_context *context)
 {
+       unsigned long irq;
+
        if (!context)
                return;
 
-       spin_lock(&ds_lock);
+       spin_lock_irqsave(&ds_lock, irq);
 
        if (--context->count)
                goto out;
@@ -311,7 +305,7 @@ static inline void ds_put_context(struct ds_context *context)
        kfree(context->ds);
        kfree(context);
  out:
-       spin_unlock(&ds_lock);
+       spin_unlock_irqrestore(&ds_lock, irq);
 }
 
 
@@ -382,6 +376,7 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
        struct ds_context *context;
        unsigned long buffer, adj;
        const unsigned long alignment = (1 << 3);
+       unsigned long irq;
        int error = 0;
 
        if (!ds_cfg.sizeof_ds)
@@ -396,26 +391,27 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
                return -EOPNOTSUPP;
 
 
-       spin_lock(&ds_lock);
-
-       error = -ENOMEM;
        context = ds_alloc_context(task);
        if (!context)
-               goto out_unlock;
+               return -ENOMEM;
+
+       spin_lock_irqsave(&ds_lock, irq);
 
        error = -EPERM;
        if (!check_tracer(task))
                goto out_unlock;
 
+       get_tracer(task);
+
        error = -EALREADY;
        if (context->owner[qual] == current)
-               goto out_unlock;
+               goto out_put_tracer;
        error = -EPERM;
        if (context->owner[qual] != NULL)
-               goto out_unlock;
+               goto out_put_tracer;
        context->owner[qual] = current;
 
-       spin_unlock(&ds_lock);
+       spin_unlock_irqrestore(&ds_lock, irq);
 
 
        error = -ENOMEM;
@@ -463,10 +459,17 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
  out_release:
        context->owner[qual] = NULL;
        ds_put_context(context);
+       put_tracer(task);
+       return error;
+
+ out_put_tracer:
+       spin_unlock_irqrestore(&ds_lock, irq);
+       ds_put_context(context);
+       put_tracer(task);
        return error;
 
  out_unlock:
-       spin_unlock(&ds_lock);
+       spin_unlock_irqrestore(&ds_lock, irq);
        ds_put_context(context);
        return error;
 }