1
0

Merge tag 'v3.12-rc4' into sched/core

Merge Linux v3.12-rc4 to fix a conflict and also to refresh the tree
before applying more scheduler patches.

Conflicts:
	arch/avr32/include/asm/Kbuild

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar
2013-10-09 12:36:13 +02:00
893 changed files with 8766 additions and 4841 deletions

View File

@@ -14,6 +14,8 @@
const char hex_asc[] = "0123456789abcdef";
EXPORT_SYMBOL(hex_asc);
const char hex_asc_upper[] = "0123456789ABCDEF";
EXPORT_SYMBOL(hex_asc_upper);
/**
* hex_to_bin - convert a hex digit to its real value

View File

@@ -933,10 +933,7 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
bool kobj_ns_current_may_mount(enum kobj_ns_type type)
{
bool may_mount = false;
if (type == KOBJ_NS_TYPE_NONE)
return true;
bool may_mount = true;
spin_lock(&kobj_ns_type_lock);
if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&

View File

@@ -3,6 +3,22 @@
#ifdef CONFIG_CMPXCHG_LOCKREF
/*
* Allow weakly-ordered memory architectures to provide barrier-less
* cmpxchg semantics for lockref updates.
*/
#ifndef cmpxchg64_relaxed
# define cmpxchg64_relaxed cmpxchg64
#endif
/*
* Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
* This is useful for architectures with an expensive cpu_relax().
*/
#ifndef arch_mutex_cpu_relax
# define arch_mutex_cpu_relax() cpu_relax()
#endif
/*
* Note that the "cmpxchg()" reloads the "old" value for the
* failure case.
@@ -14,12 +30,13 @@
while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
struct lockref new = old, prev = old; \
CODE \
old.lock_count = cmpxchg(&lockref->lock_count, \
old.lock_count, new.lock_count); \
old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
old.lock_count, \
new.lock_count); \
if (likely(old.lock_count == prev.lock_count)) { \
SUCCESS; \
} \
cpu_relax(); \
arch_mutex_cpu_relax(); \
} \
} while (0)