~funderscore blog cgit wiki get in touch
aboutsummaryrefslogtreecommitdiff
blob: a1587b7f3aee182db0889a40c0c330f855970f87 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
/*
 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * Redistributions of source code must retain the above copyright notice, this
 * list of conditions and the following disclaimer.
 *
 * Redistributions in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer in the documentation
 * and/or other materials provided with the distribution.
 *
 * Neither the name of ARM nor the names of its contributors may be used
 * to endorse or promote products derived from this software without specific
 * prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <context.h>
#include <context_mgmt.h>
#include <platform.h>
#include <stddef.h>
#include "psci_private.h"

/*******************************************************************************
 * Per cpu non-secure contexts used to program the architectural state prior
 * return to the normal world.
 * TODO: Use the memory allocator to set aside memory for the contexts instead
 * of relying on platform defined constants. Using PSCI_NUM_AFFS will be an
 * overkill.
 ******************************************************************************/
static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];

/*******************************************************************************
 * In a system, a certain number of affinity instances are present at an
 * affinity level. The cumulative number of instances across all levels are
 * stored in 'psci_aff_map'. The topology tree has been flattenned into this
 * array. To retrieve nodes, information about the extents of each affinity
 * level i.e. start index and end index needs to be present. 'psci_aff_limits'
 * stores this information.
 ******************************************************************************/
static aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1];

/*******************************************************************************
 * 'psci_ns_einfo_idx' keeps track of the next free index in the
 * 'psci_ns_entry_info' & 'psci_suspend_context' arrays.
 ******************************************************************************/
static unsigned int psci_ns_einfo_idx;

/*******************************************************************************
 * Routines for retrieving the node corresponding to an affinity level instance
 * in the mpidr. The first one uses binary search to find the node corresponding
 * to the mpidr (key) at a particular affinity level. The second routine decides
 * extents of the binary search at each affinity level.
 ******************************************************************************/
static int psci_aff_map_get_idx(unsigned long key,
				int min_idx,
				int max_idx)
{
	int mid;

	/*
	 * Terminating condition: If the max and min indices have crossed paths
	 * during the binary search then the key has not been found.
	 */
	if (max_idx < min_idx)
		return PSCI_E_INVALID_PARAMS;

	/*
	 * Bisect the array around 'mid' and then recurse into the array chunk
	 * where the key is likely to be found. The mpidrs in each node in the
	 * 'psci_aff_map' for a given affinity level are stored in an ascending
	 * order which makes the binary search possible.
	 */
	mid = min_idx + ((max_idx - min_idx) >> 1);	/* Divide by 2 */
	if (psci_aff_map[mid].mpidr > key)
		return psci_aff_map_get_idx(key, min_idx, mid - 1);
	else if (psci_aff_map[mid].mpidr < key)
		return psci_aff_map_get_idx(key, mid + 1, max_idx);
	else
		return mid;
}

aff_map_node_t *psci_get_aff_map_node(unsigned long mpidr, int aff_lvl)
{
	int rc;

	/* Right shift the mpidr to the required affinity level */
	mpidr = mpidr_mask_lower_afflvls(mpidr, aff_lvl);

	rc = psci_aff_map_get_idx(mpidr,
				  psci_aff_limits[aff_lvl].min,
				  psci_aff_limits[aff_lvl].max);
	if (rc >= 0)
		return &psci_aff_map[rc];
	else
		return NULL;
}

/*******************************************************************************
 * This function populates an array with nodes corresponding to a given range of
 * affinity levels in an mpidr. It returns successfully only when the affinity
 * levels are correct, the mpidr is valid i.e. no affinity level is absent from
 * the topology tree & the affinity instance at level 0 is not absent.
 ******************************************************************************/
int psci_get_aff_map_nodes(unsigned long mpidr,
			   int start_afflvl,
			   int end_afflvl,
			   mpidr_aff_map_nodes_t mpidr_nodes)
{
	int rc = PSCI_E_INVALID_PARAMS, level;
	aff_map_node_t *node;

	rc = psci_check_afflvl_range(start_afflvl, end_afflvl);
	if (rc != PSCI_E_SUCCESS)
		return rc;

	for (level = start_afflvl; level <= end_afflvl; level++) {

		/*
		 * Grab the node for each affinity level. No affinity level
		 * can be missing as that would mean that the topology tree
		 * is corrupted.
		 */
		node = psci_get_aff_map_node(mpidr, level);
		if (node == NULL) {
			rc = PSCI_E_INVALID_PARAMS;
			break;
		}

		/*
		 * Skip absent affinity levels unless it's afffinity level 0.
		 * An absent cpu means that the mpidr is invalid. Save the
		 * pointer to the node for the present affinity level
		 */
		if (!(node->state & PSCI_AFF_PRESENT)) {
			if (level == MPIDR_AFFLVL0) {
				rc = PSCI_E_INVALID_PARAMS;
				break;
			}

			mpidr_nodes[level] = NULL;
		} else
			mpidr_nodes[level] = node;
	}

	return rc;
}

/*******************************************************************************
 * Function which initializes the 'aff_map_node' corresponding to an affinity
 * level instance. Each node has a unique mpidr, level and bakery lock. The data
 * field is opaque and holds affinity level specific data e.g. for affinity
 * level 0 it contains the index into arrays that hold the secure/non-secure
 * state for a cpu that's been turned on/off
 ******************************************************************************/
static void psci_init_aff_map_node(unsigned long mpidr,
				   int level,
				   unsigned int idx)
{
	unsigned char state;
	uint32_t linear_id;
	psci_aff_map[idx].mpidr = mpidr;
	psci_aff_map[idx].level = level;
	bakery_lock_init(&psci_aff_map[idx].lock);

	/*
	 * If an affinity instance is present then mark it as OFF to begin with.
	 */
	state = plat_get_aff_state(level, mpidr);
	psci_aff_map[idx].state = state;

	if (level == MPIDR_AFFLVL0) {

		/*
		 * Mark the cpu as OFF. Higher affinity level reference counts
		 * have already been memset to 0
		 */
		if (state & PSCI_AFF_PRESENT)
			psci_set_state(&psci_aff_map[idx], PSCI_STATE_OFF);

		/* Ensure that we have not overflowed the psci_ns_einfo array */
		assert(psci_ns_einfo_idx < PSCI_NUM_AFFS);

		psci_aff_map[idx].data = psci_ns_einfo_idx;
		/* Invalidate the suspend context for the node */
		psci_suspend_context[psci_ns_einfo_idx].power_state = PSCI_INVALID_DATA;
		psci_ns_einfo_idx++;

		/*
		 * Associate a non-secure context with this affinity
		 * instance through the context management library.
		 */
		linear_id = platform_get_core_pos(mpidr);
		assert(linear_id < PLATFORM_CORE_COUNT);

		cm_set_context(mpidr,
				(void *) &psci_ns_context[linear_id],
				NON_SECURE);

	}

	return;
}

/*******************************************************************************
 * Core routine used by the Breadth-First-Search algorithm to populate the
 * affinity tree. Each level in the tree corresponds to an affinity level. This
 * routine's aim is to traverse to the target affinity level and populate nodes
 * in the 'psci_aff_map' for all the siblings at that level. It uses the current
 * affinity level to keep track of how many levels from the root of the tree
 * have been traversed. If the current affinity level != target affinity level,
 * then the platform is asked to return the number of children that each
 * affinity instance has at the current affinity level. Traversal is then done
 * for each child at the next lower level i.e. current affinity level - 1.
 *
 * CAUTION: This routine assumes that affinity instance ids are allocated in a
 * monotonically increasing manner at each affinity level in a mpidr starting
 * from 0. If the platform breaks this assumption then this code will have to
 * be reworked accordingly.
 ******************************************************************************/
static unsigned int psci_init_aff_map(unsigned long mpidr,
				      unsigned int affmap_idx,
				      int cur_afflvl,
				      int tgt_afflvl)
{
	unsigned int ctr, aff_count;

	assert(cur_afflvl >= tgt_afflvl);

	/*
	 * Find the number of siblings at the current affinity level &
	 * assert if there are none 'cause then we have been invoked with
	 * an invalid mpidr.
	 */
	aff_count = plat_get_aff_count(cur_afflvl, mpidr);
	assert(aff_count);

	if (tgt_afflvl < cur_afflvl) {
		for (ctr = 0; ctr < aff_count; ctr++) {
			mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl);
			affmap_idx = psci_init_aff_map(mpidr,
						       affmap_idx,
						       cur_afflvl - 1,
						       tgt_afflvl);
		}
	} else {
		for (ctr = 0; ctr < aff_count; ctr++, affmap_idx++) {
			mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl);
			psci_init_aff_map_node(mpidr, cur_afflvl, affmap_idx);
		}

		/* affmap_idx is 1 greater than the max index of cur_afflvl */
		psci_aff_limits[cur_afflvl].max = affmap_idx - 1;
	}

	return affmap_idx;
}

/*******************************************************************************
 * This function initializes the topology tree by querying the platform. To do
 * so, it's helper routines implement a Breadth-First-Search. At each affinity
 * level the platform conveys the number of affinity instances that exist i.e.
 * the affinity count. The algorithm populates the psci_aff_map recursively
 * using this information. On a platform that implements two clusters of 4 cpus
 * each, the populated aff_map_array would look like this:
 *
 *            <- cpus cluster0 -><- cpus cluster1 ->
 * ---------------------------------------------------
 * | 0  | 1  | 0  | 1  | 2  | 3  | 0  | 1  | 2  | 3  |
 * ---------------------------------------------------
 *           ^                                       ^
 * cluster __|                                 cpu __|
 * limit                                      limit
 *
 * The first 2 entries are of the cluster nodes. The next 4 entries are of cpus
 * within cluster 0. The last 4 entries are of cpus within cluster 1.
 * The 'psci_aff_limits' array contains the max & min index of each affinity
 * level within the 'psci_aff_map' array. This allows restricting search of a
 * node at an affinity level between the indices in the limits array.
 ******************************************************************************/
int32_t psci_setup(void)
{
	unsigned long mpidr = read_mpidr();
	int afflvl, affmap_idx, max_afflvl;
	aff_map_node_t *node;

	psci_ns_einfo_idx = 0;
	psci_plat_pm_ops = NULL;

	/* Find out the maximum affinity level that the platform implements */
	max_afflvl = get_max_afflvl();
	assert(max_afflvl <= MPIDR_MAX_AFFLVL);

	/*
	 * This call traverses the topology tree with help from the platform and
	 * populates the affinity map using a breadth-first-search recursively.
	 * We assume that the platform allocates affinity instance ids from 0
	 * onwards at each affinity level in the mpidr. FIRST_MPIDR = 0.0.0.0
	 */
	affmap_idx = 0;
	for (afflvl = max_afflvl; afflvl >= MPIDR_AFFLVL0; afflvl--) {
		affmap_idx = psci_init_aff_map(FIRST_MPIDR,
					       affmap_idx,
					       max_afflvl,
					       afflvl);
	}

	/*
	 * Set the bounds for the affinity counts of each level in the map. Also
	 * flush out the entire array so that it's visible to subsequent power
	 * management operations. The 'psci_aff_map' array is allocated in
	 * coherent memory so does not need flushing. The 'psci_aff_limits'
	 * array is allocated in normal memory. It will be accessed when the mmu
	 * is off e.g. after reset. Hence it needs to be flushed.
	 */
	for (afflvl = MPIDR_AFFLVL0; afflvl < max_afflvl; afflvl++) {
		psci_aff_limits[afflvl].min =
			psci_aff_limits[afflvl + 1].max + 1;
	}

	flush_dcache_range((unsigned long) psci_aff_limits,
			   sizeof(psci_aff_limits));

	/*
	 * Mark the affinity instances in our mpidr as ON. No need to lock as
	 * this is the primary cpu.
	 */
	mpidr &= MPIDR_AFFINITY_MASK;
	for (afflvl = MPIDR_AFFLVL0; afflvl <= max_afflvl; afflvl++) {

		node = psci_get_aff_map_node(mpidr, afflvl);
		assert(node);

		/* Mark each present node as ON. */
		if (node->state & PSCI_AFF_PRESENT)
			psci_set_state(node, PSCI_STATE_ON);
	}

	platform_setup_pm(&psci_plat_pm_ops);
	assert(psci_plat_pm_ops);

	return 0;
}