diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 4d635caab085b..082c366e4bc02 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -155,7 +155,7 @@ static __always_inline void lockdep_stage_restore(unsigned long flags) static inline void lockdep_lock(void) { - DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()); + DEBUG_LOCKS_WARN_ON(!hard_irqs_disabled()); __this_cpu_inc(lockdep_recursion); arch_spin_lock(&__lock); @@ -164,7 +164,7 @@ static inline void lockdep_lock(void) static inline void lockdep_unlock(void) { - DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()); + DEBUG_LOCKS_WARN_ON(!hard_irqs_disabled()); if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current)) return; @@ -1228,7 +1228,7 @@ void lockdep_register_key(struct lock_class_key *key) return; hash_head = keyhashentry(key); - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); if (!graph_lock()) goto restore_irqs; hlist_for_each_entry_rcu(k, hash_head, hash_entry) { @@ -1239,7 +1239,7 @@ void lockdep_register_key(struct lock_class_key *key) out_unlock: graph_unlock(); restore_irqs: - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } EXPORT_SYMBOL_GPL(lockdep_register_key); @@ -2092,11 +2092,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) __bfs_init_root(&this, class); - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_lock(); ret = __lockdep_count_forward_deps(&this); lockdep_unlock(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); return ret; } @@ -2118,11 +2118,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) __bfs_init_root(&this, class); - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_lock(); ret = __lockdep_count_backward_deps(&this); lockdep_unlock(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); return ret; } @@ -4276,6 +4276,8 @@ static void __trace_hardirqs_on_caller(void) */ void lockdep_hardirqs_on_prepare(void) { + unsigned long flags; + if (unlikely(!debug_locks)) return; @@ -4298,38 +4300,43 @@ void lockdep_hardirqs_on_prepare(void) return; } + flags = hard_cond_local_irq_save(); + /* * We're enabling irqs and according to our state above irqs weren't * already enabled, yet we find the hardware thinks they are in fact * enabled.. someone messed up their IRQ state tracing. */ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled())) - return; + goto out; /* * See the fine text that goes along with this variable definition. */ if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled)) - return; + goto out; /* * Can't allow enabling interrupts while in an interrupt handler, * that's general bad form and such. Recursion, limited stack etc.. */ - if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context())) - return; + if (DEBUG_LOCKS_WARN_ON(running_inband() && lockdep_hardirq_context())) + goto out; current->hardirq_chain_key = current->curr_chain_key; lockdep_recursion_inc(); __trace_hardirqs_on_caller(); lockdep_recursion_finish(); +out: + hard_cond_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare); void noinstr lockdep_hardirqs_on(unsigned long ip) { struct irqtrace_events *trace = ¤t->irqtrace; + unsigned long flags; if (unlikely(!debug_locks)) return; @@ -4367,13 +4374,15 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) return; } + flags = hard_cond_local_irq_save(); + /* * We're enabling irqs and according to our state above irqs weren't * already enabled, yet we find the hardware thinks they are in fact * enabled.. someone messed up their IRQ state tracing. */ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled())) - return; + goto out; /* * Ensure the lock stack remained unchanged between @@ -4388,6 +4397,8 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) trace->hardirq_enable_ip = ip; trace->hardirq_enable_event = ++trace->irq_events; debug_atomic_inc(hardirqs_on_events); +out: + hard_cond_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(lockdep_hardirqs_on); @@ -4396,6 +4407,8 @@ EXPORT_SYMBOL_GPL(lockdep_hardirqs_on); */ void noinstr lockdep_hardirqs_off(unsigned long ip) { + unsigned long flags; + if (unlikely(!debug_locks)) return; @@ -4410,12 +4423,14 @@ void noinstr lockdep_hardirqs_off(unsigned long ip) } else if (__this_cpu_read(lockdep_recursion)) return; + flags = hard_cond_local_irq_save(); + /* * So we're supposed to get called after you mask local IRQs, but for * some reason the hardware doesn't quite think you did a proper job. */ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled())) - return; + goto out; if (lockdep_hardirqs_enabled()) { struct irqtrace_events *trace = ¤t->irqtrace; @@ -4430,6 +4445,8 @@ void noinstr lockdep_hardirqs_off(unsigned long ip) } else { debug_atomic_inc(redundant_hardirqs_off); } +out: + hard_cond_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(lockdep_hardirqs_off); @@ -4439,20 +4456,23 @@ EXPORT_SYMBOL_GPL(lockdep_hardirqs_off); void lockdep_softirqs_on(unsigned long ip) { struct irqtrace_events *trace = ¤t->irqtrace; + unsigned long flags; if (unlikely(!lockdep_enabled())) return; + flags = hard_cond_local_irq_save(); + /* * We fancy IRQs being disabled here, see softirq.c, avoids * funny state and nesting things. */ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled())) - return; + goto out; if (current->softirqs_enabled) { debug_atomic_inc(redundant_softirqs_on); - return; + goto out; } lockdep_recursion_inc(); @@ -4471,6 +4491,8 @@ void lockdep_softirqs_on(unsigned long ip) if (lockdep_hardirqs_enabled()) mark_held_locks(current, LOCK_ENABLED_SOFTIRQ); lockdep_recursion_finish(); +out: + hard_cond_local_irq_restore(flags); } /* @@ -4478,14 +4500,18 @@ void lockdep_softirqs_on(unsigned long ip) */ void lockdep_softirqs_off(unsigned long ip) { + unsigned long flags; + if (unlikely(!lockdep_enabled())) return; + flags = hard_cond_local_irq_save(); + /* * We fancy IRQs being disabled here, see softirq.c */ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled())) - return; + goto out; if (current->softirqs_enabled) { struct irqtrace_events *trace = ¤t->irqtrace; @@ -4503,6 +4529,8 @@ void lockdep_softirqs_off(unsigned long ip) DEBUG_LOCKS_WARN_ON(!softirq_count()); } else debug_atomic_inc(redundant_softirqs_off); +out: + hard_cond_local_irq_restore(flags); } static int @@ -4857,11 +4885,11 @@ void lockdep_init_map_type(struct lockdep_map *lock, const char *name, if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled())) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_recursion_inc(); register_lock_class(lock, subclass, 1); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } } EXPORT_SYMBOL_GPL(lockdep_init_map_type); @@ -5555,13 +5583,13 @@ void lock_set_class(struct lockdep_map *lock, const char *name, if (unlikely(!lockdep_enabled())) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_recursion_inc(); check_flags(flags); if (__lock_set_class(lock, name, key, subclass, ip)) check_chain_key(current); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } EXPORT_SYMBOL_GPL(lock_set_class); @@ -5572,13 +5600,13 @@ void lock_downgrade(struct lockdep_map *lock, unsigned long ip) if (unlikely(!lockdep_enabled())) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_recursion_inc(); check_flags(flags); if (__lock_downgrade(lock, ip)) check_chain_key(current); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } EXPORT_SYMBOL_GPL(lock_downgrade); @@ -5734,13 +5762,13 @@ struct pin_cookie lock_pin_lock(struct lockdep_map *lock) if (unlikely(!lockdep_enabled())) return cookie; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); check_flags(flags); lockdep_recursion_inc(); cookie = __lock_pin_lock(lock); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); return cookie; } @@ -5753,13 +5781,13 @@ void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie) if (unlikely(!lockdep_enabled())) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); check_flags(flags); lockdep_recursion_inc(); __lock_repin_lock(lock, cookie); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } EXPORT_SYMBOL_GPL(lock_repin_lock); @@ -5770,13 +5798,13 @@ void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie) if (unlikely(!lockdep_enabled())) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); check_flags(flags); lockdep_recursion_inc(); __lock_unpin_lock(lock, cookie); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } EXPORT_SYMBOL_GPL(lock_unpin_lock); @@ -5944,7 +5972,7 @@ void lockdep_reset(void) unsigned long flags; int i; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_init_task(current); memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); nr_hardirq_chains = 0; @@ -5953,7 +5981,7 @@ void lockdep_reset(void) debug_locks = 1; for (i = 0; i < CHAINHASH_SIZE; i++) INIT_HLIST_HEAD(chainhash_table + i); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } /* Remove a class from a lock chain. Must be called with the graph lock held. */ @@ -6130,7 +6158,7 @@ static void free_zapped_rcu(struct rcu_head *ch) if (WARN_ON_ONCE(ch != &delayed_free.rcu_head)) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_lock(); /* closed head */ @@ -6144,7 +6172,7 @@ static void free_zapped_rcu(struct rcu_head *ch) call_rcu_zapped(delayed_free.pf + delayed_free.index); lockdep_unlock(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } /* @@ -6187,13 +6215,13 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size) init_data_structures_once(); - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_lock(); pf = get_pending_free(); __lockdep_free_key_range(pf, start, size); call_rcu_zapped(pf); lockdep_unlock(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); /* * Wait for any possible iterators from look_up_lock_class() to pass @@ -6213,12 +6241,12 @@ static void lockdep_free_key_range_imm(void *start, unsigned long size) init_data_structures_once(); - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_lock(); __lockdep_free_key_range(pf, start, size); __free_zapped_classes(pf); lockdep_unlock(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } void lockdep_free_key_range(void *start, unsigned long size) @@ -6289,7 +6317,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock) unsigned long flags; int locked; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); locked = graph_lock(); if (!locked) goto out_irq; @@ -6300,7 +6328,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock) graph_unlock(); out_irq: - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } /* @@ -6312,12 +6340,12 @@ static void lockdep_reset_lock_imm(struct lockdep_map *lock) struct pending_free *pf = delayed_free.pf; unsigned long flags; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_lock(); __lockdep_reset_lock(pf, lock); __free_zapped_classes(pf); lockdep_unlock(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } void lockdep_reset_lock(struct lockdep_map *lock) @@ -6350,7 +6378,7 @@ void lockdep_unregister_key(struct lock_class_key *key) if (WARN_ON_ONCE(static_obj(key))) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_lock(); hlist_for_each_entry_rcu(k, hash_head, hash_entry) { @@ -6367,7 +6395,7 @@ void lockdep_unregister_key(struct lock_class_key *key) call_rcu_zapped(pf); } lockdep_unlock(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ synchronize_rcu();