summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/dcache.h19
-rw-r--r--include/linux/lockref.h71
2 files changed, 80 insertions, 10 deletions
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 4a12532da8c4..efdc94434c30 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -9,6 +9,7 @@
#include <linux/seqlock.h>
#include <linux/cache.h>
#include <linux/rcupdate.h>
+#include <linux/lockref.h>
struct nameidata;
struct path;
@@ -100,6 +101,8 @@ extern unsigned int full_name_hash(const unsigned char *, unsigned int);
# endif
#endif
+#define d_lock d_lockref.lock
+
struct dentry {
/* RCU lookup touched fields */
unsigned int d_flags; /* protected by d_lock */
@@ -112,8 +115,7 @@ struct dentry {
unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
/* Ref lookup also touches following */
- unsigned int d_count; /* protected by d_lock */
- spinlock_t d_lock; /* per dentry lock */
+ struct lockref d_lockref; /* per-dentry lock and refcount */
const struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */
unsigned long d_time; /* used by d_revalidate */
@@ -318,7 +320,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
assert_spin_locked(&dentry->d_lock);
if (!read_seqcount_retry(&dentry->d_seq, seq)) {
ret = 1;
- dentry->d_count++;
+ dentry->d_lockref.count++;
}
return ret;
@@ -326,7 +328,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
static inline unsigned d_count(const struct dentry *dentry)
{
- return dentry->d_count;
+ return dentry->d_lockref.count;
}
/* validate "insecure" dentry pointer */
@@ -357,17 +359,14 @@ extern char *dentry_path(struct dentry *, char *, int);
static inline struct dentry *dget_dlock(struct dentry *dentry)
{
if (dentry)
- dentry->d_count++;
+ dentry->d_lockref.count++;
return dentry;
}
static inline struct dentry *dget(struct dentry *dentry)
{
- if (dentry) {
- spin_lock(&dentry->d_lock);
- dget_dlock(dentry);
- spin_unlock(&dentry->d_lock);
- }
+ if (dentry)
+ lockref_get(&dentry->d_lockref);
return dentry;
}
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
new file mode 100644
index 000000000000..01233e01627a
--- /dev/null
+++ b/include/linux/lockref.h
@@ -0,0 +1,71 @@
+#ifndef __LINUX_LOCKREF_H
+#define __LINUX_LOCKREF_H
+
+/*
+ * Locked reference counts.
+ *
+ * These are different from just plain atomic refcounts in that they
+ * are atomic with respect to the spinlock that goes with them. In
+ * particular, there can be implementations that don't actually get
+ * the spinlock for the common decrement/increment operations, but they
+ * still have to check that the operation is done semantically as if
+ * the spinlock had been taken (using a cmpxchg operation that covers
+ * both the lock and the count word, or using memory transactions, for
+ * example).
+ */
+
+#include <linux/spinlock.h>
+
+struct lockref {
+ spinlock_t lock;
+ unsigned int count;
+};
+
+/**
+ * lockref_get - Increments reference count unconditionally
+ * @lockcnt: pointer to lockref structure
+ *
+ * This operation is only valid if you already hold a reference
+ * to the object, so you know the count cannot be zero.
+ */
+static inline void lockref_get(struct lockref *lockref)
+{
+ spin_lock(&lockref->lock);
+ lockref->count++;
+ spin_unlock(&lockref->lock);
+}
+
+/**
+ * lockref_get_not_zero - Increments count unless the count is 0
+ * @lockcnt: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count is 0
+ */
+static inline int lockref_get_not_zero(struct lockref *lockref)
+{
+ int retval = 0;
+
+ spin_lock(&lockref->lock);
+ if (lockref->count) {
+ lockref->count++;
+ retval = 1;
+ }
+ spin_unlock(&lockref->lock);
+ return retval;
+}
+
+/**
+ * lockref_put_or_lock - decrements count unless count <= 1 before decrement
+ * @lockcnt: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
+ */
+static inline int lockref_put_or_lock(struct lockref *lockref)
+{
+ spin_lock(&lockref->lock);
+ if (lockref->count <= 1)
+ return 0;
+ lockref->count--;
+ spin_unlock(&lockref->lock);
+ return 1;
+}
+
+#endif /* __LINUX_LOCKREF_H */