summaryrefslogtreecommitdiffstats
path: root/arch/tile/gxio/dma_queue.c
blob: baa60357f8ba20b2e757f0aeebf01b37607d4be8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
/*
 * Copyright 2012 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 */

#include <linux/io.h>
#include <linux/atomic.h>
#include <linux/module.h>
#include <gxio/dma_queue.h>

/* Wait for a memory read to complete. */
#define wait_for_value(val)                             \
  __asm__ __volatile__("move %0, %0" :: "r"(val))

/* The index is in the low 16. */
#define DMA_QUEUE_INDEX_MASK ((1 << 16) - 1)

/*
 * The hardware descriptor-ring type.
 * This matches the types used by mpipe (MPIPE_EDMA_POST_REGION_VAL_t)
 * and trio (TRIO_PUSH_DMA_REGION_VAL_t or TRIO_PULL_DMA_REGION_VAL_t).
 * See those types for more documentation on the individual fields.
 */
typedef union {
	struct {
#ifndef __BIG_ENDIAN__
		uint64_t ring_idx:16;
		uint64_t count:16;
		uint64_t gen:1;
		uint64_t __reserved:31;
#else
		uint64_t __reserved:31;
		uint64_t gen:1;
		uint64_t count:16;
		uint64_t ring_idx:16;
#endif
	};
	uint64_t word;
} __gxio_ring_t;

void __gxio_dma_queue_init(__gxio_dma_queue_t *dma_queue,
			   void *post_region_addr, unsigned int num_entries)
{
	/*
	 * Limit 65536 entry rings to 65535 credits because we only have a
	 * 16 bit completion counter.
	 */
	int64_t credits = (num_entries < 65536) ? num_entries : 65535;

	memset(dma_queue, 0, sizeof(*dma_queue));

	dma_queue->post_region_addr = post_region_addr;
	dma_queue->hw_complete_count = 0;
	dma_queue->credits_and_next_index = credits << DMA_QUEUE_CREDIT_SHIFT;
}

EXPORT_SYMBOL_GPL(__gxio_dma_queue_init);

void __gxio_dma_queue_update_credits(__gxio_dma_queue_t *dma_queue)
{
	__gxio_ring_t val;
	uint64_t count;
	uint64_t delta;
	uint64_t new_count;

	/*
	 * Read the 64-bit completion count without touching the cache, so
	 * we later avoid having to evict any sharers of this cache line
	 * when we update it below.
	 */
	uint64_t orig_hw_complete_count =
		cmpxchg(&dma_queue->hw_complete_count,
			-1, -1);

	/* Make sure the load completes before we access the hardware. */
	wait_for_value(orig_hw_complete_count);

	/* Read the 16-bit count of how many packets it has completed. */
	val.word = __gxio_mmio_read(dma_queue->post_region_addr);
	count = val.count;

	/*
	 * Calculate the number of completions since we last updated the
	 * 64-bit counter.  It's safe to ignore the high bits because the
	 * maximum credit value is 65535.
	 */
	delta = (count - orig_hw_complete_count) & 0xffff;
	if (delta == 0)
		return;

	/*
	 * Try to write back the count, advanced by delta.  If we race with
	 * another thread, this might fail, in which case we return
	 * immediately on the assumption that some credits are (or at least
	 * were) available.
	 */
	new_count = orig_hw_complete_count + delta;
	if (cmpxchg(&dma_queue->hw_complete_count,
		    orig_hw_complete_count,
		    new_count) != orig_hw_complete_count)
		return;

	/*
	 * We succeeded in advancing the completion count; add back the
	 * corresponding number of egress credits.
	 */
	__insn_fetchadd(&dma_queue->credits_and_next_index,
			(delta << DMA_QUEUE_CREDIT_SHIFT));
}

EXPORT_SYMBOL_GPL(__gxio_dma_queue_update_credits);

/*
 * A separate 'blocked' method for put() so that backtraces and
 * profiles will clearly indicate that we're wasting time spinning on
 * egress availability rather than actually posting commands.
 */
int64_t __gxio_dma_queue_wait_for_credits(__gxio_dma_queue_t *dma_queue,
					  int64_t modifier)
{
	int backoff = 16;
	int64_t old;

	do {
		int i;
		/* Back off to avoid spamming memory networks. */
		for (i = backoff; i > 0; i--)
			__insn_mfspr(SPR_PASS);

		/* Check credits again. */
		__gxio_dma_queue_update_credits(dma_queue);
		old = __insn_fetchaddgez(&dma_queue->credits_and_next_index,
					 modifier);

		/* Calculate bounded exponential backoff for next iteration. */
		if (backoff < 256)
			backoff *= 2;
	} while (old + modifier < 0);

	return old;
}

EXPORT_SYMBOL_GPL(__gxio_dma_queue_wait_for_credits);

int64_t __gxio_dma_queue_reserve_aux(__gxio_dma_queue_t *dma_queue,
				     unsigned int num, int wait)
{
	return __gxio_dma_queue_reserve(dma_queue, num, wait != 0, true);
}

EXPORT_SYMBOL_GPL(__gxio_dma_queue_reserve_aux);

int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue,
				 int64_t completion_slot, int update)
{
	if (update) {
		if (ACCESS_ONCE(dma_queue->hw_complete_count) >
		    completion_slot)
			return 1;

		__gxio_dma_queue_update_credits(dma_queue);
	}

	return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot;
}

EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete);