xref: /arm-trusted-firmware/services/std_svc/spm/el3_spmc/spmc_shared_mem.c (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu /*
2*91f16700Schasinglulu  * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
3*91f16700Schasinglulu  *
4*91f16700Schasinglulu  * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu  */
6*91f16700Schasinglulu #include <assert.h>
7*91f16700Schasinglulu #include <errno.h>
8*91f16700Schasinglulu #include <inttypes.h>
9*91f16700Schasinglulu 
10*91f16700Schasinglulu #include <common/debug.h>
11*91f16700Schasinglulu #include <common/runtime_svc.h>
12*91f16700Schasinglulu #include <lib/object_pool.h>
13*91f16700Schasinglulu #include <lib/spinlock.h>
14*91f16700Schasinglulu #include <lib/xlat_tables/xlat_tables_v2.h>
15*91f16700Schasinglulu #include <services/ffa_svc.h>
16*91f16700Schasinglulu #include "spmc.h"
17*91f16700Schasinglulu #include "spmc_shared_mem.h"
18*91f16700Schasinglulu 
19*91f16700Schasinglulu #include <platform_def.h>
20*91f16700Schasinglulu 
21*91f16700Schasinglulu /**
22*91f16700Schasinglulu  * struct spmc_shmem_obj - Shared memory object.
23*91f16700Schasinglulu  * @desc_size:      Size of @desc.
24*91f16700Schasinglulu  * @desc_filled:    Size of @desc already received.
25*91f16700Schasinglulu  * @in_use:         Number of clients that have called ffa_mem_retrieve_req
26*91f16700Schasinglulu  *                  without a matching ffa_mem_relinquish call.
27*91f16700Schasinglulu  * @desc:           FF-A memory region descriptor passed in ffa_mem_share.
28*91f16700Schasinglulu  */
29*91f16700Schasinglulu struct spmc_shmem_obj {
30*91f16700Schasinglulu 	size_t desc_size;
31*91f16700Schasinglulu 	size_t desc_filled;
32*91f16700Schasinglulu 	size_t in_use;
33*91f16700Schasinglulu 	struct ffa_mtd desc;
34*91f16700Schasinglulu };
35*91f16700Schasinglulu 
36*91f16700Schasinglulu /*
37*91f16700Schasinglulu  * Declare our data structure to store the metadata of memory share requests.
38*91f16700Schasinglulu  * The main datastore is allocated on a per platform basis to ensure enough
39*91f16700Schasinglulu  * storage can be made available.
40*91f16700Schasinglulu  * The address of the data store will be populated by the SPMC during its
41*91f16700Schasinglulu  * initialization.
42*91f16700Schasinglulu  */
43*91f16700Schasinglulu 
44*91f16700Schasinglulu struct spmc_shmem_obj_state spmc_shmem_obj_state = {
45*91f16700Schasinglulu 	/* Set start value for handle so top 32 bits are needed quickly. */
46*91f16700Schasinglulu 	.next_handle = 0xffffffc0U,
47*91f16700Schasinglulu };
48*91f16700Schasinglulu 
49*91f16700Schasinglulu /**
50*91f16700Schasinglulu  * spmc_shmem_obj_size - Convert from descriptor size to object size.
51*91f16700Schasinglulu  * @desc_size:  Size of struct ffa_memory_region_descriptor object.
52*91f16700Schasinglulu  *
53*91f16700Schasinglulu  * Return: Size of struct spmc_shmem_obj object.
54*91f16700Schasinglulu  */
55*91f16700Schasinglulu static size_t spmc_shmem_obj_size(size_t desc_size)
56*91f16700Schasinglulu {
57*91f16700Schasinglulu 	return desc_size + offsetof(struct spmc_shmem_obj, desc);
58*91f16700Schasinglulu }
59*91f16700Schasinglulu 
60*91f16700Schasinglulu /**
61*91f16700Schasinglulu  * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
62*91f16700Schasinglulu  * @state:      Global state.
63*91f16700Schasinglulu  * @desc_size:  Size of struct ffa_memory_region_descriptor object that
64*91f16700Schasinglulu  *              allocated object will hold.
65*91f16700Schasinglulu  *
66*91f16700Schasinglulu  * Return: Pointer to newly allocated object, or %NULL if there not enough space
67*91f16700Schasinglulu  *         left. The returned pointer is only valid while @state is locked, to
68*91f16700Schasinglulu  *         used it again after unlocking @state, spmc_shmem_obj_lookup must be
69*91f16700Schasinglulu  *         called.
70*91f16700Schasinglulu  */
71*91f16700Schasinglulu static struct spmc_shmem_obj *
72*91f16700Schasinglulu spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
73*91f16700Schasinglulu {
74*91f16700Schasinglulu 	struct spmc_shmem_obj *obj;
75*91f16700Schasinglulu 	size_t free = state->data_size - state->allocated;
76*91f16700Schasinglulu 	size_t obj_size;
77*91f16700Schasinglulu 
78*91f16700Schasinglulu 	if (state->data == NULL) {
79*91f16700Schasinglulu 		ERROR("Missing shmem datastore!\n");
80*91f16700Schasinglulu 		return NULL;
81*91f16700Schasinglulu 	}
82*91f16700Schasinglulu 
83*91f16700Schasinglulu 	/* Ensure that descriptor size is aligned */
84*91f16700Schasinglulu 	if (!is_aligned(desc_size, 16)) {
85*91f16700Schasinglulu 		WARN("%s(0x%zx) desc_size not 16-byte aligned\n",
86*91f16700Schasinglulu 		     __func__, desc_size);
87*91f16700Schasinglulu 		return NULL;
88*91f16700Schasinglulu 	}
89*91f16700Schasinglulu 
90*91f16700Schasinglulu 	obj_size = spmc_shmem_obj_size(desc_size);
91*91f16700Schasinglulu 
92*91f16700Schasinglulu 	/* Ensure the obj size has not overflowed. */
93*91f16700Schasinglulu 	if (obj_size < desc_size) {
94*91f16700Schasinglulu 		WARN("%s(0x%zx) desc_size overflow\n",
95*91f16700Schasinglulu 		     __func__, desc_size);
96*91f16700Schasinglulu 		return NULL;
97*91f16700Schasinglulu 	}
98*91f16700Schasinglulu 
99*91f16700Schasinglulu 	if (obj_size > free) {
100*91f16700Schasinglulu 		WARN("%s(0x%zx) failed, free 0x%zx\n",
101*91f16700Schasinglulu 		     __func__, desc_size, free);
102*91f16700Schasinglulu 		return NULL;
103*91f16700Schasinglulu 	}
104*91f16700Schasinglulu 	obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
105*91f16700Schasinglulu 	obj->desc = (struct ffa_mtd) {0};
106*91f16700Schasinglulu 	obj->desc_size = desc_size;
107*91f16700Schasinglulu 	obj->desc_filled = 0;
108*91f16700Schasinglulu 	obj->in_use = 0;
109*91f16700Schasinglulu 	state->allocated += obj_size;
110*91f16700Schasinglulu 	return obj;
111*91f16700Schasinglulu }
112*91f16700Schasinglulu 
113*91f16700Schasinglulu /**
114*91f16700Schasinglulu  * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
115*91f16700Schasinglulu  * @state:      Global state.
116*91f16700Schasinglulu  * @obj:        Object to free.
117*91f16700Schasinglulu  *
118*91f16700Schasinglulu  * Release memory used by @obj. Other objects may move, so on return all
119*91f16700Schasinglulu  * pointers to struct spmc_shmem_obj object should be considered invalid, not
120*91f16700Schasinglulu  * just @obj.
121*91f16700Schasinglulu  *
122*91f16700Schasinglulu  * The current implementation always compacts the remaining objects to simplify
123*91f16700Schasinglulu  * the allocator and to avoid fragmentation.
124*91f16700Schasinglulu  */
125*91f16700Schasinglulu 
126*91f16700Schasinglulu static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
127*91f16700Schasinglulu 				  struct spmc_shmem_obj *obj)
128*91f16700Schasinglulu {
129*91f16700Schasinglulu 	size_t free_size = spmc_shmem_obj_size(obj->desc_size);
130*91f16700Schasinglulu 	uint8_t *shift_dest = (uint8_t *)obj;
131*91f16700Schasinglulu 	uint8_t *shift_src = shift_dest + free_size;
132*91f16700Schasinglulu 	size_t shift_size = state->allocated - (shift_src - state->data);
133*91f16700Schasinglulu 
134*91f16700Schasinglulu 	if (shift_size != 0U) {
135*91f16700Schasinglulu 		memmove(shift_dest, shift_src, shift_size);
136*91f16700Schasinglulu 	}
137*91f16700Schasinglulu 	state->allocated -= free_size;
138*91f16700Schasinglulu }
139*91f16700Schasinglulu 
140*91f16700Schasinglulu /**
141*91f16700Schasinglulu  * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
142*91f16700Schasinglulu  * @state:      Global state.
143*91f16700Schasinglulu  * @handle:     Unique handle of object to return.
144*91f16700Schasinglulu  *
145*91f16700Schasinglulu  * Return: struct spmc_shmem_obj_state object with handle matching @handle.
146*91f16700Schasinglulu  *         %NULL, if not object in @state->data has a matching handle.
147*91f16700Schasinglulu  */
148*91f16700Schasinglulu static struct spmc_shmem_obj *
149*91f16700Schasinglulu spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
150*91f16700Schasinglulu {
151*91f16700Schasinglulu 	uint8_t *curr = state->data;
152*91f16700Schasinglulu 
153*91f16700Schasinglulu 	while (curr - state->data < state->allocated) {
154*91f16700Schasinglulu 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
155*91f16700Schasinglulu 
156*91f16700Schasinglulu 		if (obj->desc.handle == handle) {
157*91f16700Schasinglulu 			return obj;
158*91f16700Schasinglulu 		}
159*91f16700Schasinglulu 		curr += spmc_shmem_obj_size(obj->desc_size);
160*91f16700Schasinglulu 	}
161*91f16700Schasinglulu 	return NULL;
162*91f16700Schasinglulu }
163*91f16700Schasinglulu 
164*91f16700Schasinglulu /**
165*91f16700Schasinglulu  * spmc_shmem_obj_get_next - Get the next memory object from an offset.
166*91f16700Schasinglulu  * @offset:     Offset used to track which objects have previously been
167*91f16700Schasinglulu  *              returned.
168*91f16700Schasinglulu  *
169*91f16700Schasinglulu  * Return: the next struct spmc_shmem_obj_state object from the provided
170*91f16700Schasinglulu  *	   offset.
171*91f16700Schasinglulu  *	   %NULL, if there are no more objects.
172*91f16700Schasinglulu  */
173*91f16700Schasinglulu static struct spmc_shmem_obj *
174*91f16700Schasinglulu spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
175*91f16700Schasinglulu {
176*91f16700Schasinglulu 	uint8_t *curr = state->data + *offset;
177*91f16700Schasinglulu 
178*91f16700Schasinglulu 	if (curr - state->data < state->allocated) {
179*91f16700Schasinglulu 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
180*91f16700Schasinglulu 
181*91f16700Schasinglulu 		*offset += spmc_shmem_obj_size(obj->desc_size);
182*91f16700Schasinglulu 
183*91f16700Schasinglulu 		return obj;
184*91f16700Schasinglulu 	}
185*91f16700Schasinglulu 	return NULL;
186*91f16700Schasinglulu }
187*91f16700Schasinglulu 
188*91f16700Schasinglulu /*******************************************************************************
189*91f16700Schasinglulu  * FF-A memory descriptor helper functions.
190*91f16700Schasinglulu  ******************************************************************************/
191*91f16700Schasinglulu /**
192*91f16700Schasinglulu  * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
193*91f16700Schasinglulu  *                           clients FF-A version.
194*91f16700Schasinglulu  * @desc:         The memory transaction descriptor.
195*91f16700Schasinglulu  * @index:        The index of the emad element to be accessed.
196*91f16700Schasinglulu  * @ffa_version:  FF-A version of the provided structure.
197*91f16700Schasinglulu  * @emad_size:    Will be populated with the size of the returned emad
198*91f16700Schasinglulu  *                descriptor.
199*91f16700Schasinglulu  * Return: A pointer to the requested emad structure.
200*91f16700Schasinglulu  */
201*91f16700Schasinglulu static void *
202*91f16700Schasinglulu spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
203*91f16700Schasinglulu 			uint32_t ffa_version, size_t *emad_size)
204*91f16700Schasinglulu {
205*91f16700Schasinglulu 	uint8_t *emad;
206*91f16700Schasinglulu 
207*91f16700Schasinglulu 	assert(index < desc->emad_count);
208*91f16700Schasinglulu 
209*91f16700Schasinglulu 	/*
210*91f16700Schasinglulu 	 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
211*91f16700Schasinglulu 	 * format, otherwise assume it is a v1.1 format.
212*91f16700Schasinglulu 	 */
213*91f16700Schasinglulu 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
214*91f16700Schasinglulu 		emad = (uint8_t *)desc + offsetof(struct ffa_mtd_v1_0, emad);
215*91f16700Schasinglulu 		*emad_size = sizeof(struct ffa_emad_v1_0);
216*91f16700Schasinglulu 	} else {
217*91f16700Schasinglulu 		assert(is_aligned(desc->emad_offset, 16));
218*91f16700Schasinglulu 		emad = ((uint8_t *) desc + desc->emad_offset);
219*91f16700Schasinglulu 		*emad_size = desc->emad_size;
220*91f16700Schasinglulu 	}
221*91f16700Schasinglulu 
222*91f16700Schasinglulu 	assert(((uint64_t)index * (uint64_t)*emad_size) <= UINT32_MAX);
223*91f16700Schasinglulu 	return (emad + (*emad_size * index));
224*91f16700Schasinglulu }
225*91f16700Schasinglulu 
226*91f16700Schasinglulu /**
227*91f16700Schasinglulu  * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
228*91f16700Schasinglulu  *				 FF-A version of the descriptor.
229*91f16700Schasinglulu  * @obj:    Object containing ffa_memory_region_descriptor.
230*91f16700Schasinglulu  *
231*91f16700Schasinglulu  * Return: struct ffa_comp_mrd object corresponding to the composite memory
232*91f16700Schasinglulu  *	   region descriptor.
233*91f16700Schasinglulu  */
234*91f16700Schasinglulu static struct ffa_comp_mrd *
235*91f16700Schasinglulu spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
236*91f16700Schasinglulu {
237*91f16700Schasinglulu 	size_t emad_size;
238*91f16700Schasinglulu 	/*
239*91f16700Schasinglulu 	 * The comp_mrd_offset field of the emad descriptor remains consistent
240*91f16700Schasinglulu 	 * between FF-A versions therefore we can use the v1.0 descriptor here
241*91f16700Schasinglulu 	 * in all cases.
242*91f16700Schasinglulu 	 */
243*91f16700Schasinglulu 	struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
244*91f16700Schasinglulu 							     ffa_version,
245*91f16700Schasinglulu 							     &emad_size);
246*91f16700Schasinglulu 
247*91f16700Schasinglulu 	/* Ensure the composite descriptor offset is aligned. */
248*91f16700Schasinglulu 	if (!is_aligned(emad->comp_mrd_offset, 8)) {
249*91f16700Schasinglulu 		WARN("Unaligned composite memory region descriptor offset.\n");
250*91f16700Schasinglulu 		return NULL;
251*91f16700Schasinglulu 	}
252*91f16700Schasinglulu 
253*91f16700Schasinglulu 	return (struct ffa_comp_mrd *)
254*91f16700Schasinglulu 	       ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
255*91f16700Schasinglulu }
256*91f16700Schasinglulu 
257*91f16700Schasinglulu /**
258*91f16700Schasinglulu  * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
259*91f16700Schasinglulu  *				a given memory transaction.
260*91f16700Schasinglulu  * @sp_id:      Partition ID to validate.
261*91f16700Schasinglulu  * @obj:        The shared memory object containing the descriptor
262*91f16700Schasinglulu  *              of the memory transaction.
263*91f16700Schasinglulu  * Return: true if ID is valid, else false.
264*91f16700Schasinglulu  */
265*91f16700Schasinglulu bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id)
266*91f16700Schasinglulu {
267*91f16700Schasinglulu 	bool found = false;
268*91f16700Schasinglulu 	struct ffa_mtd *desc = &obj->desc;
269*91f16700Schasinglulu 	size_t desc_size = obj->desc_size;
270*91f16700Schasinglulu 
271*91f16700Schasinglulu 	/* Validate the partition is a valid participant. */
272*91f16700Schasinglulu 	for (unsigned int i = 0U; i < desc->emad_count; i++) {
273*91f16700Schasinglulu 		size_t emad_size;
274*91f16700Schasinglulu 		struct ffa_emad_v1_0 *emad;
275*91f16700Schasinglulu 
276*91f16700Schasinglulu 		emad = spmc_shmem_obj_get_emad(desc, i,
277*91f16700Schasinglulu 					       MAKE_FFA_VERSION(1, 1),
278*91f16700Schasinglulu 					       &emad_size);
279*91f16700Schasinglulu 		/*
280*91f16700Schasinglulu 		 * Validate the calculated emad address resides within the
281*91f16700Schasinglulu 		 * descriptor.
282*91f16700Schasinglulu 		 */
283*91f16700Schasinglulu 		if ((emad == NULL) || (uintptr_t) emad >=
284*91f16700Schasinglulu 		    (uintptr_t)((uint8_t *) desc + desc_size)) {
285*91f16700Schasinglulu 			VERBOSE("Invalid emad.\n");
286*91f16700Schasinglulu 			break;
287*91f16700Schasinglulu 		}
288*91f16700Schasinglulu 		if (sp_id == emad->mapd.endpoint_id) {
289*91f16700Schasinglulu 			found = true;
290*91f16700Schasinglulu 			break;
291*91f16700Schasinglulu 		}
292*91f16700Schasinglulu 	}
293*91f16700Schasinglulu 	return found;
294*91f16700Schasinglulu }
295*91f16700Schasinglulu 
296*91f16700Schasinglulu /*
297*91f16700Schasinglulu  * Compare two memory regions to determine if any range overlaps with another
298*91f16700Schasinglulu  * ongoing memory transaction.
299*91f16700Schasinglulu  */
300*91f16700Schasinglulu static bool
301*91f16700Schasinglulu overlapping_memory_regions(struct ffa_comp_mrd *region1,
302*91f16700Schasinglulu 			   struct ffa_comp_mrd *region2)
303*91f16700Schasinglulu {
304*91f16700Schasinglulu 	uint64_t region1_start;
305*91f16700Schasinglulu 	uint64_t region1_size;
306*91f16700Schasinglulu 	uint64_t region1_end;
307*91f16700Schasinglulu 	uint64_t region2_start;
308*91f16700Schasinglulu 	uint64_t region2_size;
309*91f16700Schasinglulu 	uint64_t region2_end;
310*91f16700Schasinglulu 
311*91f16700Schasinglulu 	assert(region1 != NULL);
312*91f16700Schasinglulu 	assert(region2 != NULL);
313*91f16700Schasinglulu 
314*91f16700Schasinglulu 	if (region1 == region2) {
315*91f16700Schasinglulu 		return true;
316*91f16700Schasinglulu 	}
317*91f16700Schasinglulu 
318*91f16700Schasinglulu 	/*
319*91f16700Schasinglulu 	 * Check each memory region in the request against existing
320*91f16700Schasinglulu 	 * transactions.
321*91f16700Schasinglulu 	 */
322*91f16700Schasinglulu 	for (size_t i = 0; i < region1->address_range_count; i++) {
323*91f16700Schasinglulu 
324*91f16700Schasinglulu 		region1_start = region1->address_range_array[i].address;
325*91f16700Schasinglulu 		region1_size =
326*91f16700Schasinglulu 			region1->address_range_array[i].page_count *
327*91f16700Schasinglulu 			PAGE_SIZE_4KB;
328*91f16700Schasinglulu 		region1_end = region1_start + region1_size;
329*91f16700Schasinglulu 
330*91f16700Schasinglulu 		for (size_t j = 0; j < region2->address_range_count; j++) {
331*91f16700Schasinglulu 
332*91f16700Schasinglulu 			region2_start = region2->address_range_array[j].address;
333*91f16700Schasinglulu 			region2_size =
334*91f16700Schasinglulu 				region2->address_range_array[j].page_count *
335*91f16700Schasinglulu 				PAGE_SIZE_4KB;
336*91f16700Schasinglulu 			region2_end = region2_start + region2_size;
337*91f16700Schasinglulu 
338*91f16700Schasinglulu 			/* Check if regions are not overlapping. */
339*91f16700Schasinglulu 			if (!((region2_end <= region1_start) ||
340*91f16700Schasinglulu 			      (region1_end <= region2_start))) {
341*91f16700Schasinglulu 				WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
342*91f16700Schasinglulu 				     region1_start, region1_end,
343*91f16700Schasinglulu 				     region2_start, region2_end);
344*91f16700Schasinglulu 				return true;
345*91f16700Schasinglulu 			}
346*91f16700Schasinglulu 		}
347*91f16700Schasinglulu 	}
348*91f16700Schasinglulu 	return false;
349*91f16700Schasinglulu }
350*91f16700Schasinglulu 
351*91f16700Schasinglulu /*******************************************************************************
352*91f16700Schasinglulu  * FF-A v1.0 Memory Descriptor Conversion Helpers.
353*91f16700Schasinglulu  ******************************************************************************/
354*91f16700Schasinglulu /**
355*91f16700Schasinglulu  * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
356*91f16700Schasinglulu  *                                     converted descriptor.
357*91f16700Schasinglulu  * @orig:       The original v1.0 memory transaction descriptor.
358*91f16700Schasinglulu  * @desc_size:  The size of the original v1.0 memory transaction descriptor.
359*91f16700Schasinglulu  *
360*91f16700Schasinglulu  * Return: the size required to store the descriptor store in the v1.1 format.
361*91f16700Schasinglulu  */
362*91f16700Schasinglulu static uint64_t
363*91f16700Schasinglulu spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
364*91f16700Schasinglulu {
365*91f16700Schasinglulu 	uint64_t size = 0;
366*91f16700Schasinglulu 	struct ffa_comp_mrd *mrd;
367*91f16700Schasinglulu 	struct ffa_emad_v1_0 *emad_array = orig->emad;
368*91f16700Schasinglulu 
369*91f16700Schasinglulu 	/* Get the size of the v1.1 descriptor. */
370*91f16700Schasinglulu 	size += sizeof(struct ffa_mtd);
371*91f16700Schasinglulu 
372*91f16700Schasinglulu 	/* Add the size of the emad descriptors. */
373*91f16700Schasinglulu 	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
374*91f16700Schasinglulu 
375*91f16700Schasinglulu 	/* Add the size of the composite mrds. */
376*91f16700Schasinglulu 	size += sizeof(struct ffa_comp_mrd);
377*91f16700Schasinglulu 
378*91f16700Schasinglulu 	/* Add the size of the constituent mrds. */
379*91f16700Schasinglulu 	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
380*91f16700Schasinglulu 	      emad_array[0].comp_mrd_offset);
381*91f16700Schasinglulu 
382*91f16700Schasinglulu 	/* Add the size of the memory region descriptors. */
383*91f16700Schasinglulu 	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
384*91f16700Schasinglulu 
385*91f16700Schasinglulu 	return size;
386*91f16700Schasinglulu }
387*91f16700Schasinglulu 
388*91f16700Schasinglulu /**
389*91f16700Schasinglulu  * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
390*91f16700Schasinglulu  *                                     converted descriptor.
391*91f16700Schasinglulu  * @orig:       The original v1.1 memory transaction descriptor.
392*91f16700Schasinglulu  * @desc_size:  The size of the original v1.1 memory transaction descriptor.
393*91f16700Schasinglulu  *
394*91f16700Schasinglulu  * Return: the size required to store the descriptor store in the v1.0 format.
395*91f16700Schasinglulu  */
396*91f16700Schasinglulu static size_t
397*91f16700Schasinglulu spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
398*91f16700Schasinglulu {
399*91f16700Schasinglulu 	size_t size = 0;
400*91f16700Schasinglulu 	struct ffa_comp_mrd *mrd;
401*91f16700Schasinglulu 	struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
402*91f16700Schasinglulu 					   ((uint8_t *) orig +
403*91f16700Schasinglulu 					    orig->emad_offset);
404*91f16700Schasinglulu 
405*91f16700Schasinglulu 	/* Get the size of the v1.0 descriptor. */
406*91f16700Schasinglulu 	size += sizeof(struct ffa_mtd_v1_0);
407*91f16700Schasinglulu 
408*91f16700Schasinglulu 	/* Add the size of the v1.0 emad descriptors. */
409*91f16700Schasinglulu 	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
410*91f16700Schasinglulu 
411*91f16700Schasinglulu 	/* Add the size of the composite mrds. */
412*91f16700Schasinglulu 	size += sizeof(struct ffa_comp_mrd);
413*91f16700Schasinglulu 
414*91f16700Schasinglulu 	/* Add the size of the constituent mrds. */
415*91f16700Schasinglulu 	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
416*91f16700Schasinglulu 	      emad_array[0].comp_mrd_offset);
417*91f16700Schasinglulu 
418*91f16700Schasinglulu 	/* Check the calculated address is within the memory descriptor. */
419*91f16700Schasinglulu 	if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
420*91f16700Schasinglulu 	    (uintptr_t)((uint8_t *) orig + desc_size)) {
421*91f16700Schasinglulu 		return 0;
422*91f16700Schasinglulu 	}
423*91f16700Schasinglulu 	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
424*91f16700Schasinglulu 
425*91f16700Schasinglulu 	return size;
426*91f16700Schasinglulu }
427*91f16700Schasinglulu 
428*91f16700Schasinglulu /**
429*91f16700Schasinglulu  * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
430*91f16700Schasinglulu  * @out_obj:	The shared memory object to populate the converted descriptor.
431*91f16700Schasinglulu  * @orig:	The shared memory object containing the v1.0 descriptor.
432*91f16700Schasinglulu  *
433*91f16700Schasinglulu  * Return: true if the conversion is successful else false.
434*91f16700Schasinglulu  */
435*91f16700Schasinglulu static bool
436*91f16700Schasinglulu spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
437*91f16700Schasinglulu 				     struct spmc_shmem_obj *orig)
438*91f16700Schasinglulu {
439*91f16700Schasinglulu 	struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
440*91f16700Schasinglulu 	struct ffa_mtd *out = &out_obj->desc;
441*91f16700Schasinglulu 	struct ffa_emad_v1_0 *emad_array_in;
442*91f16700Schasinglulu 	struct ffa_emad_v1_0 *emad_array_out;
443*91f16700Schasinglulu 	struct ffa_comp_mrd *mrd_in;
444*91f16700Schasinglulu 	struct ffa_comp_mrd *mrd_out;
445*91f16700Schasinglulu 
446*91f16700Schasinglulu 	size_t mrd_in_offset;
447*91f16700Schasinglulu 	size_t mrd_out_offset;
448*91f16700Schasinglulu 	size_t mrd_size = 0;
449*91f16700Schasinglulu 
450*91f16700Schasinglulu 	/* Populate the new descriptor format from the v1.0 struct. */
451*91f16700Schasinglulu 	out->sender_id = mtd_orig->sender_id;
452*91f16700Schasinglulu 	out->memory_region_attributes = mtd_orig->memory_region_attributes;
453*91f16700Schasinglulu 	out->flags = mtd_orig->flags;
454*91f16700Schasinglulu 	out->handle = mtd_orig->handle;
455*91f16700Schasinglulu 	out->tag = mtd_orig->tag;
456*91f16700Schasinglulu 	out->emad_count = mtd_orig->emad_count;
457*91f16700Schasinglulu 	out->emad_size = sizeof(struct ffa_emad_v1_0);
458*91f16700Schasinglulu 
459*91f16700Schasinglulu 	/*
460*91f16700Schasinglulu 	 * We will locate the emad descriptors directly after the ffa_mtd
461*91f16700Schasinglulu 	 * struct. This will be 8-byte aligned.
462*91f16700Schasinglulu 	 */
463*91f16700Schasinglulu 	out->emad_offset = sizeof(struct ffa_mtd);
464*91f16700Schasinglulu 
465*91f16700Schasinglulu 	emad_array_in = mtd_orig->emad;
466*91f16700Schasinglulu 	emad_array_out = (struct ffa_emad_v1_0 *)
467*91f16700Schasinglulu 			 ((uint8_t *) out + out->emad_offset);
468*91f16700Schasinglulu 
469*91f16700Schasinglulu 	/* Copy across the emad structs. */
470*91f16700Schasinglulu 	for (unsigned int i = 0U; i < out->emad_count; i++) {
471*91f16700Schasinglulu 		/* Bound check for emad array. */
472*91f16700Schasinglulu 		if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) >
473*91f16700Schasinglulu 		    ((uint8_t *) mtd_orig + orig->desc_size)) {
474*91f16700Schasinglulu 			VERBOSE("%s: Invalid mtd structure.\n", __func__);
475*91f16700Schasinglulu 			return false;
476*91f16700Schasinglulu 		}
477*91f16700Schasinglulu 		memcpy(&emad_array_out[i], &emad_array_in[i],
478*91f16700Schasinglulu 		       sizeof(struct ffa_emad_v1_0));
479*91f16700Schasinglulu 	}
480*91f16700Schasinglulu 
481*91f16700Schasinglulu 	/* Place the mrd descriptors after the end of the emad descriptors.*/
482*91f16700Schasinglulu 	mrd_in_offset = emad_array_in->comp_mrd_offset;
483*91f16700Schasinglulu 	mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
484*91f16700Schasinglulu 	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
485*91f16700Schasinglulu 
486*91f16700Schasinglulu 	/* Add the size of the composite memory region descriptor. */
487*91f16700Schasinglulu 	mrd_size += sizeof(struct ffa_comp_mrd);
488*91f16700Schasinglulu 
489*91f16700Schasinglulu 	/* Find the mrd descriptor. */
490*91f16700Schasinglulu 	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
491*91f16700Schasinglulu 
492*91f16700Schasinglulu 	/* Add the size of the constituent memory region descriptors. */
493*91f16700Schasinglulu 	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
494*91f16700Schasinglulu 
495*91f16700Schasinglulu 	/*
496*91f16700Schasinglulu 	 * Update the offset in the emads by the delta between the input and
497*91f16700Schasinglulu 	 * output addresses.
498*91f16700Schasinglulu 	 */
499*91f16700Schasinglulu 	for (unsigned int i = 0U; i < out->emad_count; i++) {
500*91f16700Schasinglulu 		emad_array_out[i].comp_mrd_offset =
501*91f16700Schasinglulu 			emad_array_in[i].comp_mrd_offset +
502*91f16700Schasinglulu 			(mrd_out_offset - mrd_in_offset);
503*91f16700Schasinglulu 	}
504*91f16700Schasinglulu 
505*91f16700Schasinglulu 	/* Verify that we stay within bound of the memory descriptors. */
506*91f16700Schasinglulu 	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
507*91f16700Schasinglulu 	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
508*91f16700Schasinglulu 	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
509*91f16700Schasinglulu 	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
510*91f16700Schasinglulu 		ERROR("%s: Invalid mrd structure.\n", __func__);
511*91f16700Schasinglulu 		return false;
512*91f16700Schasinglulu 	}
513*91f16700Schasinglulu 
514*91f16700Schasinglulu 	/* Copy the mrd descriptors directly. */
515*91f16700Schasinglulu 	memcpy(mrd_out, mrd_in, mrd_size);
516*91f16700Schasinglulu 
517*91f16700Schasinglulu 	return true;
518*91f16700Schasinglulu }
519*91f16700Schasinglulu 
520*91f16700Schasinglulu /**
521*91f16700Schasinglulu  * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
522*91f16700Schasinglulu  *                                v1.0 memory object.
523*91f16700Schasinglulu  * @out_obj:    The shared memory object to populate the v1.0 descriptor.
524*91f16700Schasinglulu  * @orig:       The shared memory object containing the v1.1 descriptor.
525*91f16700Schasinglulu  *
526*91f16700Schasinglulu  * Return: true if the conversion is successful else false.
527*91f16700Schasinglulu  */
528*91f16700Schasinglulu static bool
529*91f16700Schasinglulu spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
530*91f16700Schasinglulu 			     struct spmc_shmem_obj *orig)
531*91f16700Schasinglulu {
532*91f16700Schasinglulu 	struct ffa_mtd *mtd_orig = &orig->desc;
533*91f16700Schasinglulu 	struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
534*91f16700Schasinglulu 	struct ffa_emad_v1_0 *emad_in;
535*91f16700Schasinglulu 	struct ffa_emad_v1_0 *emad_array_in;
536*91f16700Schasinglulu 	struct ffa_emad_v1_0 *emad_array_out;
537*91f16700Schasinglulu 	struct ffa_comp_mrd *mrd_in;
538*91f16700Schasinglulu 	struct ffa_comp_mrd *mrd_out;
539*91f16700Schasinglulu 
540*91f16700Schasinglulu 	size_t mrd_in_offset;
541*91f16700Schasinglulu 	size_t mrd_out_offset;
542*91f16700Schasinglulu 	size_t emad_out_array_size;
543*91f16700Schasinglulu 	size_t mrd_size = 0;
544*91f16700Schasinglulu 	size_t orig_desc_size = orig->desc_size;
545*91f16700Schasinglulu 
546*91f16700Schasinglulu 	/* Populate the v1.0 descriptor format from the v1.1 struct. */
547*91f16700Schasinglulu 	out->sender_id = mtd_orig->sender_id;
548*91f16700Schasinglulu 	out->memory_region_attributes = mtd_orig->memory_region_attributes;
549*91f16700Schasinglulu 	out->flags = mtd_orig->flags;
550*91f16700Schasinglulu 	out->handle = mtd_orig->handle;
551*91f16700Schasinglulu 	out->tag = mtd_orig->tag;
552*91f16700Schasinglulu 	out->emad_count = mtd_orig->emad_count;
553*91f16700Schasinglulu 
554*91f16700Schasinglulu 	/* Determine the location of the emad array in both descriptors. */
555*91f16700Schasinglulu 	emad_array_in = (struct ffa_emad_v1_0 *)
556*91f16700Schasinglulu 			((uint8_t *) mtd_orig + mtd_orig->emad_offset);
557*91f16700Schasinglulu 	emad_array_out = out->emad;
558*91f16700Schasinglulu 
559*91f16700Schasinglulu 	/* Copy across the emad structs. */
560*91f16700Schasinglulu 	emad_in = emad_array_in;
561*91f16700Schasinglulu 	for (unsigned int i = 0U; i < out->emad_count; i++) {
562*91f16700Schasinglulu 		/* Bound check for emad array. */
563*91f16700Schasinglulu 		if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) >
564*91f16700Schasinglulu 				((uint8_t *) mtd_orig + orig_desc_size)) {
565*91f16700Schasinglulu 			VERBOSE("%s: Invalid mtd structure.\n", __func__);
566*91f16700Schasinglulu 			return false;
567*91f16700Schasinglulu 		}
568*91f16700Schasinglulu 		memcpy(&emad_array_out[i], emad_in,
569*91f16700Schasinglulu 		       sizeof(struct ffa_emad_v1_0));
570*91f16700Schasinglulu 
571*91f16700Schasinglulu 		emad_in +=  mtd_orig->emad_size;
572*91f16700Schasinglulu 	}
573*91f16700Schasinglulu 
574*91f16700Schasinglulu 	/* Place the mrd descriptors after the end of the emad descriptors. */
575*91f16700Schasinglulu 	emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
576*91f16700Schasinglulu 
577*91f16700Schasinglulu 	mrd_out_offset =  (uint8_t *) out->emad - (uint8_t *) out +
578*91f16700Schasinglulu 			  emad_out_array_size;
579*91f16700Schasinglulu 
580*91f16700Schasinglulu 	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
581*91f16700Schasinglulu 
582*91f16700Schasinglulu 	mrd_in_offset = mtd_orig->emad_offset +
583*91f16700Schasinglulu 			(mtd_orig->emad_size * mtd_orig->emad_count);
584*91f16700Schasinglulu 
585*91f16700Schasinglulu 	/* Add the size of the composite memory region descriptor. */
586*91f16700Schasinglulu 	mrd_size += sizeof(struct ffa_comp_mrd);
587*91f16700Schasinglulu 
588*91f16700Schasinglulu 	/* Find the mrd descriptor. */
589*91f16700Schasinglulu 	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
590*91f16700Schasinglulu 
591*91f16700Schasinglulu 	/* Add the size of the constituent memory region descriptors. */
592*91f16700Schasinglulu 	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
593*91f16700Schasinglulu 
594*91f16700Schasinglulu 	/*
595*91f16700Schasinglulu 	 * Update the offset in the emads by the delta between the input and
596*91f16700Schasinglulu 	 * output addresses.
597*91f16700Schasinglulu 	 */
598*91f16700Schasinglulu 	emad_in = emad_array_in;
599*91f16700Schasinglulu 
600*91f16700Schasinglulu 	for (unsigned int i = 0U; i < out->emad_count; i++) {
601*91f16700Schasinglulu 		emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
602*91f16700Schasinglulu 						    (mrd_out_offset -
603*91f16700Schasinglulu 						     mrd_in_offset);
604*91f16700Schasinglulu 		emad_in +=  mtd_orig->emad_size;
605*91f16700Schasinglulu 	}
606*91f16700Schasinglulu 
607*91f16700Schasinglulu 	/* Verify that we stay within bound of the memory descriptors. */
608*91f16700Schasinglulu 	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
609*91f16700Schasinglulu 	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
610*91f16700Schasinglulu 	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
611*91f16700Schasinglulu 	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
612*91f16700Schasinglulu 		ERROR("%s: Invalid mrd structure.\n", __func__);
613*91f16700Schasinglulu 		return false;
614*91f16700Schasinglulu 	}
615*91f16700Schasinglulu 
616*91f16700Schasinglulu 	/* Copy the mrd descriptors directly. */
617*91f16700Schasinglulu 	memcpy(mrd_out, mrd_in, mrd_size);
618*91f16700Schasinglulu 
619*91f16700Schasinglulu 	return true;
620*91f16700Schasinglulu }
621*91f16700Schasinglulu 
622*91f16700Schasinglulu /**
623*91f16700Schasinglulu  * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
624*91f16700Schasinglulu  *                                     the v1.0 format and populates the
625*91f16700Schasinglulu  *                                     provided buffer.
626*91f16700Schasinglulu  * @dst:	    Buffer to populate v1.0 ffa_memory_region_descriptor.
627*91f16700Schasinglulu  * @orig_obj:	    Object containing v1.1 ffa_memory_region_descriptor.
628*91f16700Schasinglulu  * @buf_size:	    Size of the buffer to populate.
629*91f16700Schasinglulu  * @offset:	    The offset of the converted descriptor to copy.
630*91f16700Schasinglulu  * @copy_size:	    Will be populated with the number of bytes copied.
631*91f16700Schasinglulu  * @out_desc_size:  Will be populated with the total size of the v1.0
632*91f16700Schasinglulu  *                  descriptor.
633*91f16700Schasinglulu  *
634*91f16700Schasinglulu  * Return: 0 if conversion and population succeeded.
635*91f16700Schasinglulu  * Note: This function invalidates the reference to @orig therefore
636*91f16700Schasinglulu  * `spmc_shmem_obj_lookup` must be called if further usage is required.
637*91f16700Schasinglulu  */
638*91f16700Schasinglulu static uint32_t
639*91f16700Schasinglulu spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
640*91f16700Schasinglulu 				 size_t buf_size, size_t offset,
641*91f16700Schasinglulu 				 size_t *copy_size, size_t *v1_0_desc_size)
642*91f16700Schasinglulu {
643*91f16700Schasinglulu 		struct spmc_shmem_obj *v1_0_obj;
644*91f16700Schasinglulu 
645*91f16700Schasinglulu 		/* Calculate the size that the v1.0 descriptor will require. */
646*91f16700Schasinglulu 		*v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
647*91f16700Schasinglulu 					&orig_obj->desc, orig_obj->desc_size);
648*91f16700Schasinglulu 
649*91f16700Schasinglulu 		if (*v1_0_desc_size == 0) {
650*91f16700Schasinglulu 			ERROR("%s: cannot determine size of descriptor.\n",
651*91f16700Schasinglulu 			      __func__);
652*91f16700Schasinglulu 			return FFA_ERROR_INVALID_PARAMETER;
653*91f16700Schasinglulu 		}
654*91f16700Schasinglulu 
655*91f16700Schasinglulu 		/* Get a new obj to store the v1.0 descriptor. */
656*91f16700Schasinglulu 		v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
657*91f16700Schasinglulu 						*v1_0_desc_size);
658*91f16700Schasinglulu 
659*91f16700Schasinglulu 		if (!v1_0_obj) {
660*91f16700Schasinglulu 			return FFA_ERROR_NO_MEMORY;
661*91f16700Schasinglulu 		}
662*91f16700Schasinglulu 
663*91f16700Schasinglulu 		/* Perform the conversion from v1.1 to v1.0. */
664*91f16700Schasinglulu 		if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
665*91f16700Schasinglulu 			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
666*91f16700Schasinglulu 			return FFA_ERROR_INVALID_PARAMETER;
667*91f16700Schasinglulu 		}
668*91f16700Schasinglulu 
669*91f16700Schasinglulu 		*copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
670*91f16700Schasinglulu 		memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
671*91f16700Schasinglulu 
672*91f16700Schasinglulu 		/*
673*91f16700Schasinglulu 		 * We're finished with the v1.0 descriptor for now so free it.
674*91f16700Schasinglulu 		 * Note that this will invalidate any references to the v1.1
675*91f16700Schasinglulu 		 * descriptor.
676*91f16700Schasinglulu 		 */
677*91f16700Schasinglulu 		spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
678*91f16700Schasinglulu 
679*91f16700Schasinglulu 		return 0;
680*91f16700Schasinglulu }
681*91f16700Schasinglulu 
682*91f16700Schasinglulu static int
683*91f16700Schasinglulu spmc_validate_mtd_start(struct ffa_mtd *desc, uint32_t ffa_version,
684*91f16700Schasinglulu 			size_t fragment_length, size_t total_length)
685*91f16700Schasinglulu {
686*91f16700Schasinglulu 	unsigned long long emad_end;
687*91f16700Schasinglulu 	unsigned long long emad_size;
688*91f16700Schasinglulu 	unsigned long long emad_offset;
689*91f16700Schasinglulu 	unsigned int min_desc_size;
690*91f16700Schasinglulu 
691*91f16700Schasinglulu 	/* Determine the appropriate minimum descriptor size. */
692*91f16700Schasinglulu 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
693*91f16700Schasinglulu 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
694*91f16700Schasinglulu 	} else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
695*91f16700Schasinglulu 		min_desc_size = sizeof(struct ffa_mtd);
696*91f16700Schasinglulu 	} else {
697*91f16700Schasinglulu 		return FFA_ERROR_INVALID_PARAMETER;
698*91f16700Schasinglulu 	}
699*91f16700Schasinglulu 	if (fragment_length < min_desc_size) {
700*91f16700Schasinglulu 		WARN("%s: invalid length %zu < %u\n", __func__, fragment_length,
701*91f16700Schasinglulu 		     min_desc_size);
702*91f16700Schasinglulu 		return FFA_ERROR_INVALID_PARAMETER;
703*91f16700Schasinglulu 	}
704*91f16700Schasinglulu 
705*91f16700Schasinglulu 	if (desc->emad_count == 0U) {
706*91f16700Schasinglulu 		WARN("%s: unsupported attribute desc count %u.\n",
707*91f16700Schasinglulu 		     __func__, desc->emad_count);
708*91f16700Schasinglulu 		return FFA_ERROR_INVALID_PARAMETER;
709*91f16700Schasinglulu 	}
710*91f16700Schasinglulu 
711*91f16700Schasinglulu 	/*
712*91f16700Schasinglulu 	 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
713*91f16700Schasinglulu 	 * format, otherwise assume it is a v1.1 format.
714*91f16700Schasinglulu 	 */
715*91f16700Schasinglulu 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
716*91f16700Schasinglulu 		emad_offset = emad_size = sizeof(struct ffa_emad_v1_0);
717*91f16700Schasinglulu 	} else {
718*91f16700Schasinglulu 		if (!is_aligned(desc->emad_offset, 16)) {
719*91f16700Schasinglulu 			WARN("%s: Emad offset %" PRIx32 " is not 16-byte aligned.\n",
720*91f16700Schasinglulu 			     __func__, desc->emad_offset);
721*91f16700Schasinglulu 			return FFA_ERROR_INVALID_PARAMETER;
722*91f16700Schasinglulu 		}
723*91f16700Schasinglulu 		if (desc->emad_offset < sizeof(struct ffa_mtd)) {
724*91f16700Schasinglulu 			WARN("%s: Emad offset too small: 0x%" PRIx32 " < 0x%zx.\n",
725*91f16700Schasinglulu 			     __func__, desc->emad_offset,
726*91f16700Schasinglulu 			     sizeof(struct ffa_mtd));
727*91f16700Schasinglulu 			return FFA_ERROR_INVALID_PARAMETER;
728*91f16700Schasinglulu 		}
729*91f16700Schasinglulu 		emad_offset = desc->emad_offset;
730*91f16700Schasinglulu 		if (desc->emad_size < sizeof(struct ffa_emad_v1_0)) {
731*91f16700Schasinglulu 			WARN("%s: Bad emad size (%" PRIu32 " < %zu).\n", __func__,
732*91f16700Schasinglulu 			     desc->emad_size, sizeof(struct ffa_emad_v1_0));
733*91f16700Schasinglulu 			return FFA_ERROR_INVALID_PARAMETER;
734*91f16700Schasinglulu 		}
735*91f16700Schasinglulu 		if (!is_aligned(desc->emad_size, 16)) {
736*91f16700Schasinglulu 			WARN("%s: Emad size 0x%" PRIx32 " is not 16-byte aligned.\n",
737*91f16700Schasinglulu 			     __func__, desc->emad_size);
738*91f16700Schasinglulu 			return FFA_ERROR_INVALID_PARAMETER;
739*91f16700Schasinglulu 		}
740*91f16700Schasinglulu 		emad_size = desc->emad_size;
741*91f16700Schasinglulu 	}
742*91f16700Schasinglulu 
743*91f16700Schasinglulu 	/*
744*91f16700Schasinglulu 	 * Overflow is impossible: the arithmetic happens in at least 64-bit
745*91f16700Schasinglulu 	 * precision, but all of the operands are bounded by UINT32_MAX, and
746*91f16700Schasinglulu 	 *   ((2^32 - 1) * (2^32 - 1) + (2^32 - 1) + (2^32 - 1))
747*91f16700Schasinglulu 	 * = ((2^32 - 1) * ((2^32 - 1) + 1 + 1))
748*91f16700Schasinglulu 	 * = ((2^32 - 1) * (2^32 + 1))
749*91f16700Schasinglulu 	 * = (2^64 - 1).
750*91f16700Schasinglulu 	 */
751*91f16700Schasinglulu 	CASSERT(sizeof(desc->emad_count) == 4, assert_emad_count_max_too_large);
752*91f16700Schasinglulu 	emad_end = (desc->emad_count * (unsigned long long)emad_size) +
753*91f16700Schasinglulu 		   (unsigned long long)sizeof(struct ffa_comp_mrd) +
754*91f16700Schasinglulu 		   (unsigned long long)emad_offset;
755*91f16700Schasinglulu 
756*91f16700Schasinglulu 	if (emad_end > total_length) {
757*91f16700Schasinglulu 		WARN("%s: Composite memory region extends beyond descriptor: 0x%llx > 0x%zx\n",
758*91f16700Schasinglulu 		     __func__, emad_end, total_length);
759*91f16700Schasinglulu 		return FFA_ERROR_INVALID_PARAMETER;
760*91f16700Schasinglulu 	}
761*91f16700Schasinglulu 
762*91f16700Schasinglulu 	return 0;
763*91f16700Schasinglulu }
764*91f16700Schasinglulu 
765*91f16700Schasinglulu static inline const struct ffa_emad_v1_0 *
766*91f16700Schasinglulu emad_advance(const struct ffa_emad_v1_0 *emad, size_t offset)
767*91f16700Schasinglulu {
768*91f16700Schasinglulu 	return (const struct ffa_emad_v1_0 *)((const uint8_t *)emad + offset);
769*91f16700Schasinglulu }
770*91f16700Schasinglulu 
771*91f16700Schasinglulu /**
772*91f16700Schasinglulu  * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
773*91f16700Schasinglulu  * @obj:	  Object containing ffa_memory_region_descriptor.
774*91f16700Schasinglulu  * @ffa_version:  FF-A version of the provided descriptor.
775*91f16700Schasinglulu  *
776*91f16700Schasinglulu  * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if
777*91f16700Schasinglulu  * constituent_memory_region_descriptor offset or count is invalid.
778*91f16700Schasinglulu  */
779*91f16700Schasinglulu static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
780*91f16700Schasinglulu 				uint32_t ffa_version)
781*91f16700Schasinglulu {
782*91f16700Schasinglulu 	unsigned long long total_page_count;
783*91f16700Schasinglulu 	const struct ffa_emad_v1_0 *first_emad;
784*91f16700Schasinglulu 	const struct ffa_emad_v1_0 *end_emad;
785*91f16700Schasinglulu 	size_t emad_size;
786*91f16700Schasinglulu 	uint32_t comp_mrd_offset;
787*91f16700Schasinglulu 	size_t header_emad_size;
788*91f16700Schasinglulu 	size_t size;
789*91f16700Schasinglulu 	size_t count;
790*91f16700Schasinglulu 	size_t expected_size;
791*91f16700Schasinglulu 	const struct ffa_comp_mrd *comp;
792*91f16700Schasinglulu 
793*91f16700Schasinglulu 	if (obj->desc_filled != obj->desc_size) {
794*91f16700Schasinglulu 		ERROR("BUG: %s called on incomplete object (%zu != %zu)\n",
795*91f16700Schasinglulu 		      __func__, obj->desc_filled, obj->desc_size);
796*91f16700Schasinglulu 		panic();
797*91f16700Schasinglulu 	}
798*91f16700Schasinglulu 
799*91f16700Schasinglulu 	if (spmc_validate_mtd_start(&obj->desc, ffa_version,
800*91f16700Schasinglulu 				    obj->desc_filled, obj->desc_size)) {
801*91f16700Schasinglulu 		ERROR("BUG: %s called on object with corrupt memory region descriptor\n",
802*91f16700Schasinglulu 		      __func__);
803*91f16700Schasinglulu 		panic();
804*91f16700Schasinglulu 	}
805*91f16700Schasinglulu 
806*91f16700Schasinglulu 	first_emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
807*91f16700Schasinglulu 					     ffa_version, &emad_size);
808*91f16700Schasinglulu 	end_emad = emad_advance(first_emad, obj->desc.emad_count * emad_size);
809*91f16700Schasinglulu 	comp_mrd_offset = first_emad->comp_mrd_offset;
810*91f16700Schasinglulu 
811*91f16700Schasinglulu 	/* Loop through the endpoint descriptors, validating each of them. */
812*91f16700Schasinglulu 	for (const struct ffa_emad_v1_0 *emad = first_emad; emad < end_emad;) {
813*91f16700Schasinglulu 		ffa_endpoint_id16_t ep_id;
814*91f16700Schasinglulu 
815*91f16700Schasinglulu 		/*
816*91f16700Schasinglulu 		 * If a partition ID resides in the secure world validate that
817*91f16700Schasinglulu 		 * the partition ID is for a known partition. Ignore any
818*91f16700Schasinglulu 		 * partition ID belonging to the normal world as it is assumed
819*91f16700Schasinglulu 		 * the Hypervisor will have validated these.
820*91f16700Schasinglulu 		 */
821*91f16700Schasinglulu 		ep_id = emad->mapd.endpoint_id;
822*91f16700Schasinglulu 		if (ffa_is_secure_world_id(ep_id)) {
823*91f16700Schasinglulu 			if (spmc_get_sp_ctx(ep_id) == NULL) {
824*91f16700Schasinglulu 				WARN("%s: Invalid receiver id 0x%x\n",
825*91f16700Schasinglulu 				     __func__, ep_id);
826*91f16700Schasinglulu 				return FFA_ERROR_INVALID_PARAMETER;
827*91f16700Schasinglulu 			}
828*91f16700Schasinglulu 		}
829*91f16700Schasinglulu 
830*91f16700Schasinglulu 		/*
831*91f16700Schasinglulu 		 * The offset provided to the composite memory region descriptor
832*91f16700Schasinglulu 		 * should be consistent across endpoint descriptors.
833*91f16700Schasinglulu 		 */
834*91f16700Schasinglulu 		if (comp_mrd_offset != emad->comp_mrd_offset) {
835*91f16700Schasinglulu 			ERROR("%s: mismatching offsets provided, %u != %u\n",
836*91f16700Schasinglulu 			       __func__, emad->comp_mrd_offset, comp_mrd_offset);
837*91f16700Schasinglulu 			return FFA_ERROR_INVALID_PARAMETER;
838*91f16700Schasinglulu 		}
839*91f16700Schasinglulu 
840*91f16700Schasinglulu 		/* Advance to the next endpoint descriptor */
841*91f16700Schasinglulu 		emad = emad_advance(emad, emad_size);
842*91f16700Schasinglulu 
843*91f16700Schasinglulu 		/*
844*91f16700Schasinglulu 		 * Ensure neither this emad nor any subsequent emads have
845*91f16700Schasinglulu 		 * the same partition ID as the previous emad.
846*91f16700Schasinglulu 		 */
847*91f16700Schasinglulu 		for (const struct ffa_emad_v1_0 *other_emad = emad;
848*91f16700Schasinglulu 		     other_emad < end_emad;
849*91f16700Schasinglulu 		     other_emad = emad_advance(other_emad, emad_size)) {
850*91f16700Schasinglulu 			if (ep_id == other_emad->mapd.endpoint_id) {
851*91f16700Schasinglulu 				WARN("%s: Duplicated endpoint id 0x%x\n",
852*91f16700Schasinglulu 				     __func__, emad->mapd.endpoint_id);
853*91f16700Schasinglulu 				return FFA_ERROR_INVALID_PARAMETER;
854*91f16700Schasinglulu 			}
855*91f16700Schasinglulu 		}
856*91f16700Schasinglulu 	}
857*91f16700Schasinglulu 
858*91f16700Schasinglulu 	header_emad_size = (size_t)((const uint8_t *)end_emad -
859*91f16700Schasinglulu 				    (const uint8_t *)&obj->desc);
860*91f16700Schasinglulu 
861*91f16700Schasinglulu 	/*
862*91f16700Schasinglulu 	 * Check that the composite descriptor
863*91f16700Schasinglulu 	 * is after the endpoint descriptors.
864*91f16700Schasinglulu 	 */
865*91f16700Schasinglulu 	if (comp_mrd_offset < header_emad_size) {
866*91f16700Schasinglulu 		WARN("%s: invalid object, offset %u < header + emad %zu\n",
867*91f16700Schasinglulu 		     __func__, comp_mrd_offset, header_emad_size);
868*91f16700Schasinglulu 		return FFA_ERROR_INVALID_PARAMETER;
869*91f16700Schasinglulu 	}
870*91f16700Schasinglulu 
871*91f16700Schasinglulu 	/* Ensure the composite descriptor offset is aligned. */
872*91f16700Schasinglulu 	if (!is_aligned(comp_mrd_offset, 16)) {
873*91f16700Schasinglulu 		WARN("%s: invalid object, unaligned composite memory "
874*91f16700Schasinglulu 		     "region descriptor offset %u.\n",
875*91f16700Schasinglulu 		     __func__, comp_mrd_offset);
876*91f16700Schasinglulu 		return FFA_ERROR_INVALID_PARAMETER;
877*91f16700Schasinglulu 	}
878*91f16700Schasinglulu 
879*91f16700Schasinglulu 	size = obj->desc_size;
880*91f16700Schasinglulu 
881*91f16700Schasinglulu 	/* Check that the composite descriptor is in bounds. */
882*91f16700Schasinglulu 	if (comp_mrd_offset > size) {
883*91f16700Schasinglulu 		WARN("%s: invalid object, offset %u > total size %zu\n",
884*91f16700Schasinglulu 		     __func__, comp_mrd_offset, obj->desc_size);
885*91f16700Schasinglulu 		return FFA_ERROR_INVALID_PARAMETER;
886*91f16700Schasinglulu 	}
887*91f16700Schasinglulu 	size -= comp_mrd_offset;
888*91f16700Schasinglulu 
889*91f16700Schasinglulu 	/* Check that there is enough space for the composite descriptor. */
890*91f16700Schasinglulu 	if (size < sizeof(struct ffa_comp_mrd)) {
891*91f16700Schasinglulu 		WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
892*91f16700Schasinglulu 		     __func__, comp_mrd_offset, obj->desc_size);
893*91f16700Schasinglulu 		return FFA_ERROR_INVALID_PARAMETER;
894*91f16700Schasinglulu 	}
895*91f16700Schasinglulu 	size -= sizeof(*comp);
896*91f16700Schasinglulu 
897*91f16700Schasinglulu 	count = size / sizeof(struct ffa_cons_mrd);
898*91f16700Schasinglulu 
899*91f16700Schasinglulu 	comp = (const struct ffa_comp_mrd *)
900*91f16700Schasinglulu 	       ((const uint8_t *)(&obj->desc) + comp_mrd_offset);
901*91f16700Schasinglulu 
902*91f16700Schasinglulu 	if (comp->address_range_count != count) {
903*91f16700Schasinglulu 		WARN("%s: invalid object, desc count %u != %zu\n",
904*91f16700Schasinglulu 		     __func__, comp->address_range_count, count);
905*91f16700Schasinglulu 		return FFA_ERROR_INVALID_PARAMETER;
906*91f16700Schasinglulu 	}
907*91f16700Schasinglulu 
908*91f16700Schasinglulu 	/* Ensure that the expected and actual sizes are equal. */
909*91f16700Schasinglulu 	expected_size = comp_mrd_offset + sizeof(*comp) +
910*91f16700Schasinglulu 		count * sizeof(struct ffa_cons_mrd);
911*91f16700Schasinglulu 
912*91f16700Schasinglulu 	if (expected_size != obj->desc_size) {
913*91f16700Schasinglulu 		WARN("%s: invalid object, computed size %zu != size %zu\n",
914*91f16700Schasinglulu 		       __func__, expected_size, obj->desc_size);
915*91f16700Schasinglulu 		return FFA_ERROR_INVALID_PARAMETER;
916*91f16700Schasinglulu 	}
917*91f16700Schasinglulu 
918*91f16700Schasinglulu 	total_page_count = 0;
919*91f16700Schasinglulu 
920*91f16700Schasinglulu 	/*
921*91f16700Schasinglulu 	 * comp->address_range_count is 32-bit, so 'count' must fit in a
922*91f16700Schasinglulu 	 * uint32_t at this point.
923*91f16700Schasinglulu 	 */
924*91f16700Schasinglulu 	for (size_t i = 0; i < count; i++) {
925*91f16700Schasinglulu 		const struct ffa_cons_mrd *mrd = comp->address_range_array + i;
926*91f16700Schasinglulu 
927*91f16700Schasinglulu 		if (!is_aligned(mrd->address, PAGE_SIZE)) {
928*91f16700Schasinglulu 			WARN("%s: invalid object, address in region descriptor "
929*91f16700Schasinglulu 			     "%zu not 4K aligned (got 0x%016llx)",
930*91f16700Schasinglulu 			     __func__, i, (unsigned long long)mrd->address);
931*91f16700Schasinglulu 		}
932*91f16700Schasinglulu 
933*91f16700Schasinglulu 		/*
934*91f16700Schasinglulu 		 * No overflow possible: total_page_count can hold at
935*91f16700Schasinglulu 		 * least 2^64 - 1, but will be have at most 2^32 - 1.
936*91f16700Schasinglulu 		 * values added to it, each of which cannot exceed 2^32 - 1.
937*91f16700Schasinglulu 		 */
938*91f16700Schasinglulu 		total_page_count += mrd->page_count;
939*91f16700Schasinglulu 	}
940*91f16700Schasinglulu 
941*91f16700Schasinglulu 	if (comp->total_page_count != total_page_count) {
942*91f16700Schasinglulu 		WARN("%s: invalid object, desc total_page_count %u != %llu\n",
943*91f16700Schasinglulu 		     __func__, comp->total_page_count, total_page_count);
944*91f16700Schasinglulu 		return FFA_ERROR_INVALID_PARAMETER;
945*91f16700Schasinglulu 	}
946*91f16700Schasinglulu 
947*91f16700Schasinglulu 	return 0;
948*91f16700Schasinglulu }
949*91f16700Schasinglulu 
950*91f16700Schasinglulu /**
951*91f16700Schasinglulu  * spmc_shmem_check_state_obj - Check if the descriptor describes memory
952*91f16700Schasinglulu  *				regions that are currently involved with an
953*91f16700Schasinglulu  *				existing memory transactions. This implies that
954*91f16700Schasinglulu  *				the memory is not in a valid state for lending.
955*91f16700Schasinglulu  * @obj:    Object containing ffa_memory_region_descriptor.
956*91f16700Schasinglulu  *
957*91f16700Schasinglulu  * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if invalid memory
958*91f16700Schasinglulu  * state.
959*91f16700Schasinglulu  */
960*91f16700Schasinglulu static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
961*91f16700Schasinglulu 				      uint32_t ffa_version)
962*91f16700Schasinglulu {
963*91f16700Schasinglulu 	size_t obj_offset = 0;
964*91f16700Schasinglulu 	struct spmc_shmem_obj *inflight_obj;
965*91f16700Schasinglulu 
966*91f16700Schasinglulu 	struct ffa_comp_mrd *other_mrd;
967*91f16700Schasinglulu 	struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
968*91f16700Schasinglulu 								  ffa_version);
969*91f16700Schasinglulu 
970*91f16700Schasinglulu 	if (requested_mrd == NULL) {
971*91f16700Schasinglulu 		return FFA_ERROR_INVALID_PARAMETER;
972*91f16700Schasinglulu 	}
973*91f16700Schasinglulu 
974*91f16700Schasinglulu 	inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
975*91f16700Schasinglulu 					       &obj_offset);
976*91f16700Schasinglulu 
977*91f16700Schasinglulu 	while (inflight_obj != NULL) {
978*91f16700Schasinglulu 		/*
979*91f16700Schasinglulu 		 * Don't compare the transaction to itself or to partially
980*91f16700Schasinglulu 		 * transmitted descriptors.
981*91f16700Schasinglulu 		 */
982*91f16700Schasinglulu 		if ((obj->desc.handle != inflight_obj->desc.handle) &&
983*91f16700Schasinglulu 		    (obj->desc_size == obj->desc_filled)) {
984*91f16700Schasinglulu 			other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
985*91f16700Schasinglulu 							  FFA_VERSION_COMPILED);
986*91f16700Schasinglulu 			if (other_mrd == NULL) {
987*91f16700Schasinglulu 				return FFA_ERROR_INVALID_PARAMETER;
988*91f16700Schasinglulu 			}
989*91f16700Schasinglulu 			if (overlapping_memory_regions(requested_mrd,
990*91f16700Schasinglulu 						       other_mrd)) {
991*91f16700Schasinglulu 				return FFA_ERROR_INVALID_PARAMETER;
992*91f16700Schasinglulu 			}
993*91f16700Schasinglulu 		}
994*91f16700Schasinglulu 
995*91f16700Schasinglulu 		inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
996*91f16700Schasinglulu 						       &obj_offset);
997*91f16700Schasinglulu 	}
998*91f16700Schasinglulu 	return 0;
999*91f16700Schasinglulu }
1000*91f16700Schasinglulu 
1001*91f16700Schasinglulu static long spmc_ffa_fill_desc(struct mailbox *mbox,
1002*91f16700Schasinglulu 			       struct spmc_shmem_obj *obj,
1003*91f16700Schasinglulu 			       uint32_t fragment_length,
1004*91f16700Schasinglulu 			       ffa_mtd_flag32_t mtd_flag,
1005*91f16700Schasinglulu 			       uint32_t ffa_version,
1006*91f16700Schasinglulu 			       void *smc_handle)
1007*91f16700Schasinglulu {
1008*91f16700Schasinglulu 	int ret;
1009*91f16700Schasinglulu 	uint32_t handle_low;
1010*91f16700Schasinglulu 	uint32_t handle_high;
1011*91f16700Schasinglulu 
1012*91f16700Schasinglulu 	if (mbox->rxtx_page_count == 0U) {
1013*91f16700Schasinglulu 		WARN("%s: buffer pair not registered.\n", __func__);
1014*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1015*91f16700Schasinglulu 		goto err_arg;
1016*91f16700Schasinglulu 	}
1017*91f16700Schasinglulu 
1018*91f16700Schasinglulu 	CASSERT(sizeof(mbox->rxtx_page_count) == 4, assert_bogus_page_count);
1019*91f16700Schasinglulu 	if (fragment_length > (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB) {
1020*91f16700Schasinglulu 		WARN("%s: bad fragment size %u > %" PRIu64 " buffer size\n", __func__,
1021*91f16700Schasinglulu 		     fragment_length, (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB);
1022*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1023*91f16700Schasinglulu 		goto err_arg;
1024*91f16700Schasinglulu 	}
1025*91f16700Schasinglulu 
1026*91f16700Schasinglulu 	if (fragment_length > obj->desc_size - obj->desc_filled) {
1027*91f16700Schasinglulu 		WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
1028*91f16700Schasinglulu 		     fragment_length, obj->desc_size - obj->desc_filled);
1029*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1030*91f16700Schasinglulu 		goto err_arg;
1031*91f16700Schasinglulu 	}
1032*91f16700Schasinglulu 
1033*91f16700Schasinglulu 	memcpy((uint8_t *)&obj->desc + obj->desc_filled,
1034*91f16700Schasinglulu 	       (uint8_t *) mbox->tx_buffer, fragment_length);
1035*91f16700Schasinglulu 
1036*91f16700Schasinglulu 	/* Ensure that the sender ID resides in the normal world. */
1037*91f16700Schasinglulu 	if (ffa_is_secure_world_id(obj->desc.sender_id)) {
1038*91f16700Schasinglulu 		WARN("%s: Invalid sender ID 0x%x.\n",
1039*91f16700Schasinglulu 		     __func__, obj->desc.sender_id);
1040*91f16700Schasinglulu 		ret = FFA_ERROR_DENIED;
1041*91f16700Schasinglulu 		goto err_arg;
1042*91f16700Schasinglulu 	}
1043*91f16700Schasinglulu 
1044*91f16700Schasinglulu 	/* Ensure the NS bit is set to 0. */
1045*91f16700Schasinglulu 	if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1046*91f16700Schasinglulu 		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1047*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1048*91f16700Schasinglulu 		goto err_arg;
1049*91f16700Schasinglulu 	}
1050*91f16700Schasinglulu 
1051*91f16700Schasinglulu 	/*
1052*91f16700Schasinglulu 	 * We don't currently support any optional flags so ensure none are
1053*91f16700Schasinglulu 	 * requested.
1054*91f16700Schasinglulu 	 */
1055*91f16700Schasinglulu 	if (obj->desc.flags != 0U && mtd_flag != 0U &&
1056*91f16700Schasinglulu 	    (obj->desc.flags != mtd_flag)) {
1057*91f16700Schasinglulu 		WARN("%s: invalid memory transaction flags %u != %u\n",
1058*91f16700Schasinglulu 		     __func__, obj->desc.flags, mtd_flag);
1059*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1060*91f16700Schasinglulu 		goto err_arg;
1061*91f16700Schasinglulu 	}
1062*91f16700Schasinglulu 
1063*91f16700Schasinglulu 	if (obj->desc_filled == 0U) {
1064*91f16700Schasinglulu 		/* First fragment, descriptor header has been copied */
1065*91f16700Schasinglulu 		ret = spmc_validate_mtd_start(&obj->desc, ffa_version,
1066*91f16700Schasinglulu 					      fragment_length, obj->desc_size);
1067*91f16700Schasinglulu 		if (ret != 0) {
1068*91f16700Schasinglulu 			goto err_bad_desc;
1069*91f16700Schasinglulu 		}
1070*91f16700Schasinglulu 
1071*91f16700Schasinglulu 		obj->desc.handle = spmc_shmem_obj_state.next_handle++;
1072*91f16700Schasinglulu 		obj->desc.flags |= mtd_flag;
1073*91f16700Schasinglulu 	}
1074*91f16700Schasinglulu 
1075*91f16700Schasinglulu 	obj->desc_filled += fragment_length;
1076*91f16700Schasinglulu 
1077*91f16700Schasinglulu 	handle_low = (uint32_t)obj->desc.handle;
1078*91f16700Schasinglulu 	handle_high = obj->desc.handle >> 32;
1079*91f16700Schasinglulu 
1080*91f16700Schasinglulu 	if (obj->desc_filled != obj->desc_size) {
1081*91f16700Schasinglulu 		SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
1082*91f16700Schasinglulu 			 handle_high, obj->desc_filled,
1083*91f16700Schasinglulu 			 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
1084*91f16700Schasinglulu 	}
1085*91f16700Schasinglulu 
1086*91f16700Schasinglulu 	/* The full descriptor has been received, perform any final checks. */
1087*91f16700Schasinglulu 
1088*91f16700Schasinglulu 	ret = spmc_shmem_check_obj(obj, ffa_version);
1089*91f16700Schasinglulu 	if (ret != 0) {
1090*91f16700Schasinglulu 		goto err_bad_desc;
1091*91f16700Schasinglulu 	}
1092*91f16700Schasinglulu 
1093*91f16700Schasinglulu 	ret = spmc_shmem_check_state_obj(obj, ffa_version);
1094*91f16700Schasinglulu 	if (ret) {
1095*91f16700Schasinglulu 		ERROR("%s: invalid memory region descriptor.\n", __func__);
1096*91f16700Schasinglulu 		goto err_bad_desc;
1097*91f16700Schasinglulu 	}
1098*91f16700Schasinglulu 
1099*91f16700Schasinglulu 	/*
1100*91f16700Schasinglulu 	 * Everything checks out, if the sender was using FF-A v1.0, convert
1101*91f16700Schasinglulu 	 * the descriptor format to use the v1.1 structures.
1102*91f16700Schasinglulu 	 */
1103*91f16700Schasinglulu 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1104*91f16700Schasinglulu 		struct spmc_shmem_obj *v1_1_obj;
1105*91f16700Schasinglulu 		uint64_t mem_handle;
1106*91f16700Schasinglulu 
1107*91f16700Schasinglulu 		/* Calculate the size that the v1.1 descriptor will required. */
1108*91f16700Schasinglulu 		uint64_t v1_1_desc_size =
1109*91f16700Schasinglulu 		    spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
1110*91f16700Schasinglulu 						      obj->desc_size);
1111*91f16700Schasinglulu 
1112*91f16700Schasinglulu 		if (v1_1_desc_size > UINT32_MAX) {
1113*91f16700Schasinglulu 			ret = FFA_ERROR_NO_MEMORY;
1114*91f16700Schasinglulu 			goto err_arg;
1115*91f16700Schasinglulu 		}
1116*91f16700Schasinglulu 
1117*91f16700Schasinglulu 		/* Get a new obj to store the v1.1 descriptor. */
1118*91f16700Schasinglulu 		v1_1_obj =
1119*91f16700Schasinglulu 		    spmc_shmem_obj_alloc(&spmc_shmem_obj_state, (size_t)v1_1_desc_size);
1120*91f16700Schasinglulu 
1121*91f16700Schasinglulu 		if (!v1_1_obj) {
1122*91f16700Schasinglulu 			ret = FFA_ERROR_NO_MEMORY;
1123*91f16700Schasinglulu 			goto err_arg;
1124*91f16700Schasinglulu 		}
1125*91f16700Schasinglulu 
1126*91f16700Schasinglulu 		/* Perform the conversion from v1.0 to v1.1. */
1127*91f16700Schasinglulu 		v1_1_obj->desc_size = (uint32_t)v1_1_desc_size;
1128*91f16700Schasinglulu 		v1_1_obj->desc_filled = (uint32_t)v1_1_desc_size;
1129*91f16700Schasinglulu 		if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1130*91f16700Schasinglulu 			ERROR("%s: Could not convert mtd!\n", __func__);
1131*91f16700Schasinglulu 			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1132*91f16700Schasinglulu 			goto err_arg;
1133*91f16700Schasinglulu 		}
1134*91f16700Schasinglulu 
1135*91f16700Schasinglulu 		/*
1136*91f16700Schasinglulu 		 * We're finished with the v1.0 descriptor so free it
1137*91f16700Schasinglulu 		 * and continue our checks with the new v1.1 descriptor.
1138*91f16700Schasinglulu 		 */
1139*91f16700Schasinglulu 		mem_handle = obj->desc.handle;
1140*91f16700Schasinglulu 		spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1141*91f16700Schasinglulu 		obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1142*91f16700Schasinglulu 		if (obj == NULL) {
1143*91f16700Schasinglulu 			ERROR("%s: Failed to find converted descriptor.\n",
1144*91f16700Schasinglulu 			     __func__);
1145*91f16700Schasinglulu 			ret = FFA_ERROR_INVALID_PARAMETER;
1146*91f16700Schasinglulu 			return spmc_ffa_error_return(smc_handle, ret);
1147*91f16700Schasinglulu 		}
1148*91f16700Schasinglulu 	}
1149*91f16700Schasinglulu 
1150*91f16700Schasinglulu 	/* Allow for platform specific operations to be performed. */
1151*91f16700Schasinglulu 	ret = plat_spmc_shmem_begin(&obj->desc);
1152*91f16700Schasinglulu 	if (ret != 0) {
1153*91f16700Schasinglulu 		goto err_arg;
1154*91f16700Schasinglulu 	}
1155*91f16700Schasinglulu 
1156*91f16700Schasinglulu 	SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1157*91f16700Schasinglulu 		 0, 0, 0);
1158*91f16700Schasinglulu 
1159*91f16700Schasinglulu err_bad_desc:
1160*91f16700Schasinglulu err_arg:
1161*91f16700Schasinglulu 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1162*91f16700Schasinglulu 	return spmc_ffa_error_return(smc_handle, ret);
1163*91f16700Schasinglulu }
1164*91f16700Schasinglulu 
1165*91f16700Schasinglulu /**
1166*91f16700Schasinglulu  * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1167*91f16700Schasinglulu  * @client:             Client state.
1168*91f16700Schasinglulu  * @total_length:       Total length of shared memory descriptor.
1169*91f16700Schasinglulu  * @fragment_length:    Length of fragment of shared memory descriptor passed in
1170*91f16700Schasinglulu  *                      this call.
1171*91f16700Schasinglulu  * @address:            Not supported, must be 0.
1172*91f16700Schasinglulu  * @page_count:         Not supported, must be 0.
1173*91f16700Schasinglulu  * @smc_handle:         Handle passed to smc call. Used to return
1174*91f16700Schasinglulu  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1175*91f16700Schasinglulu  *
1176*91f16700Schasinglulu  * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1177*91f16700Schasinglulu  * to share or lend memory from non-secure os to secure os (with no stream
1178*91f16700Schasinglulu  * endpoints).
1179*91f16700Schasinglulu  *
1180*91f16700Schasinglulu  * Return: 0 on success, error code on failure.
1181*91f16700Schasinglulu  */
1182*91f16700Schasinglulu long spmc_ffa_mem_send(uint32_t smc_fid,
1183*91f16700Schasinglulu 			bool secure_origin,
1184*91f16700Schasinglulu 			uint64_t total_length,
1185*91f16700Schasinglulu 			uint32_t fragment_length,
1186*91f16700Schasinglulu 			uint64_t address,
1187*91f16700Schasinglulu 			uint32_t page_count,
1188*91f16700Schasinglulu 			void *cookie,
1189*91f16700Schasinglulu 			void *handle,
1190*91f16700Schasinglulu 			uint64_t flags)
1191*91f16700Schasinglulu 
1192*91f16700Schasinglulu {
1193*91f16700Schasinglulu 	long ret;
1194*91f16700Schasinglulu 	struct spmc_shmem_obj *obj;
1195*91f16700Schasinglulu 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1196*91f16700Schasinglulu 	ffa_mtd_flag32_t mtd_flag;
1197*91f16700Schasinglulu 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1198*91f16700Schasinglulu 	size_t min_desc_size;
1199*91f16700Schasinglulu 
1200*91f16700Schasinglulu 	if (address != 0U || page_count != 0U) {
1201*91f16700Schasinglulu 		WARN("%s: custom memory region for message not supported.\n",
1202*91f16700Schasinglulu 		     __func__);
1203*91f16700Schasinglulu 		return spmc_ffa_error_return(handle,
1204*91f16700Schasinglulu 					     FFA_ERROR_INVALID_PARAMETER);
1205*91f16700Schasinglulu 	}
1206*91f16700Schasinglulu 
1207*91f16700Schasinglulu 	if (secure_origin) {
1208*91f16700Schasinglulu 		WARN("%s: unsupported share direction.\n", __func__);
1209*91f16700Schasinglulu 		return spmc_ffa_error_return(handle,
1210*91f16700Schasinglulu 					     FFA_ERROR_INVALID_PARAMETER);
1211*91f16700Schasinglulu 	}
1212*91f16700Schasinglulu 
1213*91f16700Schasinglulu 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1214*91f16700Schasinglulu 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
1215*91f16700Schasinglulu 	} else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
1216*91f16700Schasinglulu 		min_desc_size = sizeof(struct ffa_mtd);
1217*91f16700Schasinglulu 	} else {
1218*91f16700Schasinglulu 		WARN("%s: bad FF-A version.\n", __func__);
1219*91f16700Schasinglulu 		return spmc_ffa_error_return(handle,
1220*91f16700Schasinglulu 					     FFA_ERROR_INVALID_PARAMETER);
1221*91f16700Schasinglulu 	}
1222*91f16700Schasinglulu 
1223*91f16700Schasinglulu 	/* Check if the descriptor is too small for the FF-A version. */
1224*91f16700Schasinglulu 	if (fragment_length < min_desc_size) {
1225*91f16700Schasinglulu 		WARN("%s: bad first fragment size %u < %zu\n",
1226*91f16700Schasinglulu 		     __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
1227*91f16700Schasinglulu 		return spmc_ffa_error_return(handle,
1228*91f16700Schasinglulu 					     FFA_ERROR_INVALID_PARAMETER);
1229*91f16700Schasinglulu 	}
1230*91f16700Schasinglulu 
1231*91f16700Schasinglulu 	if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1232*91f16700Schasinglulu 		mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1233*91f16700Schasinglulu 	} else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1234*91f16700Schasinglulu 		mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1235*91f16700Schasinglulu 	} else {
1236*91f16700Schasinglulu 		WARN("%s: invalid memory management operation.\n", __func__);
1237*91f16700Schasinglulu 		return spmc_ffa_error_return(handle,
1238*91f16700Schasinglulu 					     FFA_ERROR_INVALID_PARAMETER);
1239*91f16700Schasinglulu 	}
1240*91f16700Schasinglulu 
1241*91f16700Schasinglulu 	spin_lock(&spmc_shmem_obj_state.lock);
1242*91f16700Schasinglulu 	obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1243*91f16700Schasinglulu 	if (obj == NULL) {
1244*91f16700Schasinglulu 		ret = FFA_ERROR_NO_MEMORY;
1245*91f16700Schasinglulu 		goto err_unlock;
1246*91f16700Schasinglulu 	}
1247*91f16700Schasinglulu 
1248*91f16700Schasinglulu 	spin_lock(&mbox->lock);
1249*91f16700Schasinglulu 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1250*91f16700Schasinglulu 				 ffa_version, handle);
1251*91f16700Schasinglulu 	spin_unlock(&mbox->lock);
1252*91f16700Schasinglulu 
1253*91f16700Schasinglulu 	spin_unlock(&spmc_shmem_obj_state.lock);
1254*91f16700Schasinglulu 	return ret;
1255*91f16700Schasinglulu 
1256*91f16700Schasinglulu err_unlock:
1257*91f16700Schasinglulu 	spin_unlock(&spmc_shmem_obj_state.lock);
1258*91f16700Schasinglulu 	return spmc_ffa_error_return(handle, ret);
1259*91f16700Schasinglulu }
1260*91f16700Schasinglulu 
1261*91f16700Schasinglulu /**
1262*91f16700Schasinglulu  * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1263*91f16700Schasinglulu  * @client:             Client state.
1264*91f16700Schasinglulu  * @handle_low:         Handle_low value returned from FFA_MEM_FRAG_RX.
1265*91f16700Schasinglulu  * @handle_high:        Handle_high value returned from FFA_MEM_FRAG_RX.
1266*91f16700Schasinglulu  * @fragment_length:    Length of fragments transmitted.
1267*91f16700Schasinglulu  * @sender_id:          Vmid of sender in bits [31:16]
1268*91f16700Schasinglulu  * @smc_handle:         Handle passed to smc call. Used to return
1269*91f16700Schasinglulu  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1270*91f16700Schasinglulu  *
1271*91f16700Schasinglulu  * Return: @smc_handle on success, error code on failure.
1272*91f16700Schasinglulu  */
1273*91f16700Schasinglulu long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1274*91f16700Schasinglulu 			  bool secure_origin,
1275*91f16700Schasinglulu 			  uint64_t handle_low,
1276*91f16700Schasinglulu 			  uint64_t handle_high,
1277*91f16700Schasinglulu 			  uint32_t fragment_length,
1278*91f16700Schasinglulu 			  uint32_t sender_id,
1279*91f16700Schasinglulu 			  void *cookie,
1280*91f16700Schasinglulu 			  void *handle,
1281*91f16700Schasinglulu 			  uint64_t flags)
1282*91f16700Schasinglulu {
1283*91f16700Schasinglulu 	long ret;
1284*91f16700Schasinglulu 	uint32_t desc_sender_id;
1285*91f16700Schasinglulu 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1286*91f16700Schasinglulu 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1287*91f16700Schasinglulu 
1288*91f16700Schasinglulu 	struct spmc_shmem_obj *obj;
1289*91f16700Schasinglulu 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1290*91f16700Schasinglulu 
1291*91f16700Schasinglulu 	spin_lock(&spmc_shmem_obj_state.lock);
1292*91f16700Schasinglulu 
1293*91f16700Schasinglulu 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1294*91f16700Schasinglulu 	if (obj == NULL) {
1295*91f16700Schasinglulu 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1296*91f16700Schasinglulu 		     __func__, mem_handle);
1297*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1298*91f16700Schasinglulu 		goto err_unlock;
1299*91f16700Schasinglulu 	}
1300*91f16700Schasinglulu 
1301*91f16700Schasinglulu 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1302*91f16700Schasinglulu 	if (sender_id != desc_sender_id) {
1303*91f16700Schasinglulu 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1304*91f16700Schasinglulu 		     sender_id, desc_sender_id);
1305*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1306*91f16700Schasinglulu 		goto err_unlock;
1307*91f16700Schasinglulu 	}
1308*91f16700Schasinglulu 
1309*91f16700Schasinglulu 	if (obj->desc_filled == obj->desc_size) {
1310*91f16700Schasinglulu 		WARN("%s: object desc already filled, %zu\n", __func__,
1311*91f16700Schasinglulu 		     obj->desc_filled);
1312*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1313*91f16700Schasinglulu 		goto err_unlock;
1314*91f16700Schasinglulu 	}
1315*91f16700Schasinglulu 
1316*91f16700Schasinglulu 	spin_lock(&mbox->lock);
1317*91f16700Schasinglulu 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1318*91f16700Schasinglulu 				 handle);
1319*91f16700Schasinglulu 	spin_unlock(&mbox->lock);
1320*91f16700Schasinglulu 
1321*91f16700Schasinglulu 	spin_unlock(&spmc_shmem_obj_state.lock);
1322*91f16700Schasinglulu 	return ret;
1323*91f16700Schasinglulu 
1324*91f16700Schasinglulu err_unlock:
1325*91f16700Schasinglulu 	spin_unlock(&spmc_shmem_obj_state.lock);
1326*91f16700Schasinglulu 	return spmc_ffa_error_return(handle, ret);
1327*91f16700Schasinglulu }
1328*91f16700Schasinglulu 
1329*91f16700Schasinglulu /**
1330*91f16700Schasinglulu  * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
1331*91f16700Schasinglulu  *				      if the caller implements a version greater
1332*91f16700Schasinglulu  *				      than FF-A 1.0 or if they have requested
1333*91f16700Schasinglulu  *				      the functionality.
1334*91f16700Schasinglulu  *				      TODO: We are assuming that the caller is
1335*91f16700Schasinglulu  *				      an SP. To support retrieval from the
1336*91f16700Schasinglulu  *				      normal world this function will need to be
1337*91f16700Schasinglulu  *				      expanded accordingly.
1338*91f16700Schasinglulu  * @resp:       Descriptor populated in callers RX buffer.
1339*91f16700Schasinglulu  * @sp_ctx:     Context of the calling SP.
1340*91f16700Schasinglulu  */
1341*91f16700Schasinglulu void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
1342*91f16700Schasinglulu 			 struct secure_partition_desc *sp_ctx)
1343*91f16700Schasinglulu {
1344*91f16700Schasinglulu 	if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
1345*91f16700Schasinglulu 	    sp_ctx->ns_bit_requested) {
1346*91f16700Schasinglulu 		/*
1347*91f16700Schasinglulu 		 * Currently memory senders must reside in the normal
1348*91f16700Schasinglulu 		 * world, and we do not have the functionlaity to change
1349*91f16700Schasinglulu 		 * the state of memory dynamically. Therefore we can always set
1350*91f16700Schasinglulu 		 * the NS bit to 1.
1351*91f16700Schasinglulu 		 */
1352*91f16700Schasinglulu 		resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1353*91f16700Schasinglulu 	}
1354*91f16700Schasinglulu }
1355*91f16700Schasinglulu 
1356*91f16700Schasinglulu /**
1357*91f16700Schasinglulu  * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1358*91f16700Schasinglulu  * @smc_fid:            FID of SMC
1359*91f16700Schasinglulu  * @total_length:       Total length of retrieve request descriptor if this is
1360*91f16700Schasinglulu  *                      the first call. Otherwise (unsupported) must be 0.
1361*91f16700Schasinglulu  * @fragment_length:    Length of fragment of retrieve request descriptor passed
1362*91f16700Schasinglulu  *                      in this call. Only @fragment_length == @length is
1363*91f16700Schasinglulu  *                      supported by this implementation.
1364*91f16700Schasinglulu  * @address:            Not supported, must be 0.
1365*91f16700Schasinglulu  * @page_count:         Not supported, must be 0.
1366*91f16700Schasinglulu  * @smc_handle:         Handle passed to smc call. Used to return
1367*91f16700Schasinglulu  *                      FFA_MEM_RETRIEVE_RESP.
1368*91f16700Schasinglulu  *
1369*91f16700Schasinglulu  * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1370*91f16700Schasinglulu  * Used by secure os to retrieve memory already shared by non-secure os.
1371*91f16700Schasinglulu  * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1372*91f16700Schasinglulu  * the client must call FFA_MEM_FRAG_RX until the full response has been
1373*91f16700Schasinglulu  * received.
1374*91f16700Schasinglulu  *
1375*91f16700Schasinglulu  * Return: @handle on success, error code on failure.
1376*91f16700Schasinglulu  */
1377*91f16700Schasinglulu long
1378*91f16700Schasinglulu spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1379*91f16700Schasinglulu 			  bool secure_origin,
1380*91f16700Schasinglulu 			  uint32_t total_length,
1381*91f16700Schasinglulu 			  uint32_t fragment_length,
1382*91f16700Schasinglulu 			  uint64_t address,
1383*91f16700Schasinglulu 			  uint32_t page_count,
1384*91f16700Schasinglulu 			  void *cookie,
1385*91f16700Schasinglulu 			  void *handle,
1386*91f16700Schasinglulu 			  uint64_t flags)
1387*91f16700Schasinglulu {
1388*91f16700Schasinglulu 	int ret;
1389*91f16700Schasinglulu 	size_t buf_size;
1390*91f16700Schasinglulu 	size_t copy_size = 0;
1391*91f16700Schasinglulu 	size_t min_desc_size;
1392*91f16700Schasinglulu 	size_t out_desc_size = 0;
1393*91f16700Schasinglulu 
1394*91f16700Schasinglulu 	/*
1395*91f16700Schasinglulu 	 * Currently we are only accessing fields that are the same in both the
1396*91f16700Schasinglulu 	 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1397*91f16700Schasinglulu 	 * here. We only need validate against the appropriate struct size.
1398*91f16700Schasinglulu 	 */
1399*91f16700Schasinglulu 	struct ffa_mtd *resp;
1400*91f16700Schasinglulu 	const struct ffa_mtd *req;
1401*91f16700Schasinglulu 	struct spmc_shmem_obj *obj = NULL;
1402*91f16700Schasinglulu 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1403*91f16700Schasinglulu 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1404*91f16700Schasinglulu 	struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1405*91f16700Schasinglulu 
1406*91f16700Schasinglulu 	if (!secure_origin) {
1407*91f16700Schasinglulu 		WARN("%s: unsupported retrieve req direction.\n", __func__);
1408*91f16700Schasinglulu 		return spmc_ffa_error_return(handle,
1409*91f16700Schasinglulu 					     FFA_ERROR_INVALID_PARAMETER);
1410*91f16700Schasinglulu 	}
1411*91f16700Schasinglulu 
1412*91f16700Schasinglulu 	if (address != 0U || page_count != 0U) {
1413*91f16700Schasinglulu 		WARN("%s: custom memory region not supported.\n", __func__);
1414*91f16700Schasinglulu 		return spmc_ffa_error_return(handle,
1415*91f16700Schasinglulu 					     FFA_ERROR_INVALID_PARAMETER);
1416*91f16700Schasinglulu 	}
1417*91f16700Schasinglulu 
1418*91f16700Schasinglulu 	spin_lock(&mbox->lock);
1419*91f16700Schasinglulu 
1420*91f16700Schasinglulu 	req = mbox->tx_buffer;
1421*91f16700Schasinglulu 	resp = mbox->rx_buffer;
1422*91f16700Schasinglulu 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1423*91f16700Schasinglulu 
1424*91f16700Schasinglulu 	if (mbox->rxtx_page_count == 0U) {
1425*91f16700Schasinglulu 		WARN("%s: buffer pair not registered.\n", __func__);
1426*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1427*91f16700Schasinglulu 		goto err_unlock_mailbox;
1428*91f16700Schasinglulu 	}
1429*91f16700Schasinglulu 
1430*91f16700Schasinglulu 	if (mbox->state != MAILBOX_STATE_EMPTY) {
1431*91f16700Schasinglulu 		WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1432*91f16700Schasinglulu 		ret = FFA_ERROR_DENIED;
1433*91f16700Schasinglulu 		goto err_unlock_mailbox;
1434*91f16700Schasinglulu 	}
1435*91f16700Schasinglulu 
1436*91f16700Schasinglulu 	if (fragment_length != total_length) {
1437*91f16700Schasinglulu 		WARN("%s: fragmented retrieve request not supported.\n",
1438*91f16700Schasinglulu 		     __func__);
1439*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1440*91f16700Schasinglulu 		goto err_unlock_mailbox;
1441*91f16700Schasinglulu 	}
1442*91f16700Schasinglulu 
1443*91f16700Schasinglulu 	if (req->emad_count == 0U) {
1444*91f16700Schasinglulu 		WARN("%s: unsupported attribute desc count %u.\n",
1445*91f16700Schasinglulu 		     __func__, obj->desc.emad_count);
1446*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1447*91f16700Schasinglulu 		goto err_unlock_mailbox;
1448*91f16700Schasinglulu 	}
1449*91f16700Schasinglulu 
1450*91f16700Schasinglulu 	/* Determine the appropriate minimum descriptor size. */
1451*91f16700Schasinglulu 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1452*91f16700Schasinglulu 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
1453*91f16700Schasinglulu 	} else {
1454*91f16700Schasinglulu 		min_desc_size = sizeof(struct ffa_mtd);
1455*91f16700Schasinglulu 	}
1456*91f16700Schasinglulu 	if (total_length < min_desc_size) {
1457*91f16700Schasinglulu 		WARN("%s: invalid length %u < %zu\n", __func__, total_length,
1458*91f16700Schasinglulu 		     min_desc_size);
1459*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1460*91f16700Schasinglulu 		goto err_unlock_mailbox;
1461*91f16700Schasinglulu 	}
1462*91f16700Schasinglulu 
1463*91f16700Schasinglulu 	spin_lock(&spmc_shmem_obj_state.lock);
1464*91f16700Schasinglulu 
1465*91f16700Schasinglulu 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1466*91f16700Schasinglulu 	if (obj == NULL) {
1467*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1468*91f16700Schasinglulu 		goto err_unlock_all;
1469*91f16700Schasinglulu 	}
1470*91f16700Schasinglulu 
1471*91f16700Schasinglulu 	if (obj->desc_filled != obj->desc_size) {
1472*91f16700Schasinglulu 		WARN("%s: incomplete object desc filled %zu < size %zu\n",
1473*91f16700Schasinglulu 		     __func__, obj->desc_filled, obj->desc_size);
1474*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1475*91f16700Schasinglulu 		goto err_unlock_all;
1476*91f16700Schasinglulu 	}
1477*91f16700Schasinglulu 
1478*91f16700Schasinglulu 	if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1479*91f16700Schasinglulu 		WARN("%s: wrong sender id 0x%x != 0x%x\n",
1480*91f16700Schasinglulu 		     __func__, req->sender_id, obj->desc.sender_id);
1481*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1482*91f16700Schasinglulu 		goto err_unlock_all;
1483*91f16700Schasinglulu 	}
1484*91f16700Schasinglulu 
1485*91f16700Schasinglulu 	if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1486*91f16700Schasinglulu 		WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1487*91f16700Schasinglulu 		     __func__, req->tag, obj->desc.tag);
1488*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1489*91f16700Schasinglulu 		goto err_unlock_all;
1490*91f16700Schasinglulu 	}
1491*91f16700Schasinglulu 
1492*91f16700Schasinglulu 	if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1493*91f16700Schasinglulu 		WARN("%s: mistmatch of endpoint counts %u != %u\n",
1494*91f16700Schasinglulu 		     __func__, req->emad_count, obj->desc.emad_count);
1495*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1496*91f16700Schasinglulu 		goto err_unlock_all;
1497*91f16700Schasinglulu 	}
1498*91f16700Schasinglulu 
1499*91f16700Schasinglulu 	/* Ensure the NS bit is set to 0 in the request. */
1500*91f16700Schasinglulu 	if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1501*91f16700Schasinglulu 		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1502*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1503*91f16700Schasinglulu 		goto err_unlock_all;
1504*91f16700Schasinglulu 	}
1505*91f16700Schasinglulu 
1506*91f16700Schasinglulu 	if (req->flags != 0U) {
1507*91f16700Schasinglulu 		if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1508*91f16700Schasinglulu 		    (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1509*91f16700Schasinglulu 			/*
1510*91f16700Schasinglulu 			 * If the retrieve request specifies the memory
1511*91f16700Schasinglulu 			 * transaction ensure it matches what we expect.
1512*91f16700Schasinglulu 			 */
1513*91f16700Schasinglulu 			WARN("%s: wrong mem transaction flags %x != %x\n",
1514*91f16700Schasinglulu 			__func__, req->flags, obj->desc.flags);
1515*91f16700Schasinglulu 			ret = FFA_ERROR_INVALID_PARAMETER;
1516*91f16700Schasinglulu 			goto err_unlock_all;
1517*91f16700Schasinglulu 		}
1518*91f16700Schasinglulu 
1519*91f16700Schasinglulu 		if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1520*91f16700Schasinglulu 		    req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1521*91f16700Schasinglulu 			/*
1522*91f16700Schasinglulu 			 * Current implementation does not support donate and
1523*91f16700Schasinglulu 			 * it supports no other flags.
1524*91f16700Schasinglulu 			 */
1525*91f16700Schasinglulu 			WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1526*91f16700Schasinglulu 			ret = FFA_ERROR_INVALID_PARAMETER;
1527*91f16700Schasinglulu 			goto err_unlock_all;
1528*91f16700Schasinglulu 		}
1529*91f16700Schasinglulu 	}
1530*91f16700Schasinglulu 
1531*91f16700Schasinglulu 	/* Validate the caller is a valid participant. */
1532*91f16700Schasinglulu 	if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
1533*91f16700Schasinglulu 		WARN("%s: Invalid endpoint ID (0x%x).\n",
1534*91f16700Schasinglulu 			__func__, sp_ctx->sp_id);
1535*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1536*91f16700Schasinglulu 		goto err_unlock_all;
1537*91f16700Schasinglulu 	}
1538*91f16700Schasinglulu 
1539*91f16700Schasinglulu 	/* Validate that the provided emad offset and structure is valid.*/
1540*91f16700Schasinglulu 	for (size_t i = 0; i < req->emad_count; i++) {
1541*91f16700Schasinglulu 		size_t emad_size;
1542*91f16700Schasinglulu 		struct ffa_emad_v1_0 *emad;
1543*91f16700Schasinglulu 
1544*91f16700Schasinglulu 		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1545*91f16700Schasinglulu 					       &emad_size);
1546*91f16700Schasinglulu 
1547*91f16700Schasinglulu 		if ((uintptr_t) emad >= (uintptr_t)
1548*91f16700Schasinglulu 					((uint8_t *) req + total_length)) {
1549*91f16700Schasinglulu 			WARN("Invalid emad access.\n");
1550*91f16700Schasinglulu 			ret = FFA_ERROR_INVALID_PARAMETER;
1551*91f16700Schasinglulu 			goto err_unlock_all;
1552*91f16700Schasinglulu 		}
1553*91f16700Schasinglulu 	}
1554*91f16700Schasinglulu 
1555*91f16700Schasinglulu 	/*
1556*91f16700Schasinglulu 	 * Validate all the endpoints match in the case of multiple
1557*91f16700Schasinglulu 	 * borrowers. We don't mandate that the order of the borrowers
1558*91f16700Schasinglulu 	 * must match in the descriptors therefore check to see if the
1559*91f16700Schasinglulu 	 * endpoints match in any order.
1560*91f16700Schasinglulu 	 */
1561*91f16700Schasinglulu 	for (size_t i = 0; i < req->emad_count; i++) {
1562*91f16700Schasinglulu 		bool found = false;
1563*91f16700Schasinglulu 		size_t emad_size;
1564*91f16700Schasinglulu 		struct ffa_emad_v1_0 *emad;
1565*91f16700Schasinglulu 		struct ffa_emad_v1_0 *other_emad;
1566*91f16700Schasinglulu 
1567*91f16700Schasinglulu 		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1568*91f16700Schasinglulu 					       &emad_size);
1569*91f16700Schasinglulu 
1570*91f16700Schasinglulu 		for (size_t j = 0; j < obj->desc.emad_count; j++) {
1571*91f16700Schasinglulu 			other_emad = spmc_shmem_obj_get_emad(
1572*91f16700Schasinglulu 					&obj->desc, j, MAKE_FFA_VERSION(1, 1),
1573*91f16700Schasinglulu 					&emad_size);
1574*91f16700Schasinglulu 
1575*91f16700Schasinglulu 			if (req->emad_count &&
1576*91f16700Schasinglulu 			    emad->mapd.endpoint_id ==
1577*91f16700Schasinglulu 			    other_emad->mapd.endpoint_id) {
1578*91f16700Schasinglulu 				found = true;
1579*91f16700Schasinglulu 				break;
1580*91f16700Schasinglulu 			}
1581*91f16700Schasinglulu 		}
1582*91f16700Schasinglulu 
1583*91f16700Schasinglulu 		if (!found) {
1584*91f16700Schasinglulu 			WARN("%s: invalid receiver id (0x%x).\n",
1585*91f16700Schasinglulu 			     __func__, emad->mapd.endpoint_id);
1586*91f16700Schasinglulu 			ret = FFA_ERROR_INVALID_PARAMETER;
1587*91f16700Schasinglulu 			goto err_unlock_all;
1588*91f16700Schasinglulu 		}
1589*91f16700Schasinglulu 	}
1590*91f16700Schasinglulu 
1591*91f16700Schasinglulu 	mbox->state = MAILBOX_STATE_FULL;
1592*91f16700Schasinglulu 
1593*91f16700Schasinglulu 	if (req->emad_count != 0U) {
1594*91f16700Schasinglulu 		obj->in_use++;
1595*91f16700Schasinglulu 	}
1596*91f16700Schasinglulu 
1597*91f16700Schasinglulu 	/*
1598*91f16700Schasinglulu 	 * If the caller is v1.0 convert the descriptor, otherwise copy
1599*91f16700Schasinglulu 	 * directly.
1600*91f16700Schasinglulu 	 */
1601*91f16700Schasinglulu 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1602*91f16700Schasinglulu 		ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1603*91f16700Schasinglulu 							&copy_size,
1604*91f16700Schasinglulu 							&out_desc_size);
1605*91f16700Schasinglulu 		if (ret != 0U) {
1606*91f16700Schasinglulu 			ERROR("%s: Failed to process descriptor.\n", __func__);
1607*91f16700Schasinglulu 			goto err_unlock_all;
1608*91f16700Schasinglulu 		}
1609*91f16700Schasinglulu 	} else {
1610*91f16700Schasinglulu 		copy_size = MIN(obj->desc_size, buf_size);
1611*91f16700Schasinglulu 		out_desc_size = obj->desc_size;
1612*91f16700Schasinglulu 
1613*91f16700Schasinglulu 		memcpy(resp, &obj->desc, copy_size);
1614*91f16700Schasinglulu 	}
1615*91f16700Schasinglulu 
1616*91f16700Schasinglulu 	/* Set the NS bit in the response if applicable. */
1617*91f16700Schasinglulu 	spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
1618*91f16700Schasinglulu 
1619*91f16700Schasinglulu 	spin_unlock(&spmc_shmem_obj_state.lock);
1620*91f16700Schasinglulu 	spin_unlock(&mbox->lock);
1621*91f16700Schasinglulu 
1622*91f16700Schasinglulu 	SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
1623*91f16700Schasinglulu 		 copy_size, 0, 0, 0, 0, 0);
1624*91f16700Schasinglulu 
1625*91f16700Schasinglulu err_unlock_all:
1626*91f16700Schasinglulu 	spin_unlock(&spmc_shmem_obj_state.lock);
1627*91f16700Schasinglulu err_unlock_mailbox:
1628*91f16700Schasinglulu 	spin_unlock(&mbox->lock);
1629*91f16700Schasinglulu 	return spmc_ffa_error_return(handle, ret);
1630*91f16700Schasinglulu }
1631*91f16700Schasinglulu 
1632*91f16700Schasinglulu /**
1633*91f16700Schasinglulu  * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1634*91f16700Schasinglulu  * @client:             Client state.
1635*91f16700Schasinglulu  * @handle_low:         Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1636*91f16700Schasinglulu  * @handle_high:        Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1637*91f16700Schasinglulu  * @fragment_offset:    Byte offset in descriptor to resume at.
1638*91f16700Schasinglulu  * @sender_id:          Bit[31:16]: Endpoint id of sender if client is a
1639*91f16700Schasinglulu  *                      hypervisor. 0 otherwise.
1640*91f16700Schasinglulu  * @smc_handle:         Handle passed to smc call. Used to return
1641*91f16700Schasinglulu  *                      FFA_MEM_FRAG_TX.
1642*91f16700Schasinglulu  *
1643*91f16700Schasinglulu  * Return: @smc_handle on success, error code on failure.
1644*91f16700Schasinglulu  */
1645*91f16700Schasinglulu long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1646*91f16700Schasinglulu 			  bool secure_origin,
1647*91f16700Schasinglulu 			  uint32_t handle_low,
1648*91f16700Schasinglulu 			  uint32_t handle_high,
1649*91f16700Schasinglulu 			  uint32_t fragment_offset,
1650*91f16700Schasinglulu 			  uint32_t sender_id,
1651*91f16700Schasinglulu 			  void *cookie,
1652*91f16700Schasinglulu 			  void *handle,
1653*91f16700Schasinglulu 			  uint64_t flags)
1654*91f16700Schasinglulu {
1655*91f16700Schasinglulu 	int ret;
1656*91f16700Schasinglulu 	void *src;
1657*91f16700Schasinglulu 	size_t buf_size;
1658*91f16700Schasinglulu 	size_t copy_size;
1659*91f16700Schasinglulu 	size_t full_copy_size;
1660*91f16700Schasinglulu 	uint32_t desc_sender_id;
1661*91f16700Schasinglulu 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1662*91f16700Schasinglulu 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1663*91f16700Schasinglulu 	struct spmc_shmem_obj *obj;
1664*91f16700Schasinglulu 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1665*91f16700Schasinglulu 
1666*91f16700Schasinglulu 	if (!secure_origin) {
1667*91f16700Schasinglulu 		WARN("%s: can only be called from swld.\n",
1668*91f16700Schasinglulu 		     __func__);
1669*91f16700Schasinglulu 		return spmc_ffa_error_return(handle,
1670*91f16700Schasinglulu 					     FFA_ERROR_INVALID_PARAMETER);
1671*91f16700Schasinglulu 	}
1672*91f16700Schasinglulu 
1673*91f16700Schasinglulu 	spin_lock(&spmc_shmem_obj_state.lock);
1674*91f16700Schasinglulu 
1675*91f16700Schasinglulu 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1676*91f16700Schasinglulu 	if (obj == NULL) {
1677*91f16700Schasinglulu 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1678*91f16700Schasinglulu 		     __func__, mem_handle);
1679*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1680*91f16700Schasinglulu 		goto err_unlock_shmem;
1681*91f16700Schasinglulu 	}
1682*91f16700Schasinglulu 
1683*91f16700Schasinglulu 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1684*91f16700Schasinglulu 	if (sender_id != 0U && sender_id != desc_sender_id) {
1685*91f16700Schasinglulu 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1686*91f16700Schasinglulu 		     sender_id, desc_sender_id);
1687*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1688*91f16700Schasinglulu 		goto err_unlock_shmem;
1689*91f16700Schasinglulu 	}
1690*91f16700Schasinglulu 
1691*91f16700Schasinglulu 	if (fragment_offset >= obj->desc_size) {
1692*91f16700Schasinglulu 		WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
1693*91f16700Schasinglulu 		     __func__, fragment_offset, obj->desc_size);
1694*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1695*91f16700Schasinglulu 		goto err_unlock_shmem;
1696*91f16700Schasinglulu 	}
1697*91f16700Schasinglulu 
1698*91f16700Schasinglulu 	spin_lock(&mbox->lock);
1699*91f16700Schasinglulu 
1700*91f16700Schasinglulu 	if (mbox->rxtx_page_count == 0U) {
1701*91f16700Schasinglulu 		WARN("%s: buffer pair not registered.\n", __func__);
1702*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1703*91f16700Schasinglulu 		goto err_unlock_all;
1704*91f16700Schasinglulu 	}
1705*91f16700Schasinglulu 
1706*91f16700Schasinglulu 	if (mbox->state != MAILBOX_STATE_EMPTY) {
1707*91f16700Schasinglulu 		WARN("%s: RX Buffer is full!\n", __func__);
1708*91f16700Schasinglulu 		ret = FFA_ERROR_DENIED;
1709*91f16700Schasinglulu 		goto err_unlock_all;
1710*91f16700Schasinglulu 	}
1711*91f16700Schasinglulu 
1712*91f16700Schasinglulu 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1713*91f16700Schasinglulu 
1714*91f16700Schasinglulu 	mbox->state = MAILBOX_STATE_FULL;
1715*91f16700Schasinglulu 
1716*91f16700Schasinglulu 	/*
1717*91f16700Schasinglulu 	 * If the caller is v1.0 convert the descriptor, otherwise copy
1718*91f16700Schasinglulu 	 * directly.
1719*91f16700Schasinglulu 	 */
1720*91f16700Schasinglulu 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1721*91f16700Schasinglulu 		size_t out_desc_size;
1722*91f16700Schasinglulu 
1723*91f16700Schasinglulu 		ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1724*91f16700Schasinglulu 							buf_size,
1725*91f16700Schasinglulu 							fragment_offset,
1726*91f16700Schasinglulu 							&copy_size,
1727*91f16700Schasinglulu 							&out_desc_size);
1728*91f16700Schasinglulu 		if (ret != 0U) {
1729*91f16700Schasinglulu 			ERROR("%s: Failed to process descriptor.\n", __func__);
1730*91f16700Schasinglulu 			goto err_unlock_all;
1731*91f16700Schasinglulu 		}
1732*91f16700Schasinglulu 	} else {
1733*91f16700Schasinglulu 		full_copy_size = obj->desc_size - fragment_offset;
1734*91f16700Schasinglulu 		copy_size = MIN(full_copy_size, buf_size);
1735*91f16700Schasinglulu 
1736*91f16700Schasinglulu 		src = &obj->desc;
1737*91f16700Schasinglulu 
1738*91f16700Schasinglulu 		memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
1739*91f16700Schasinglulu 	}
1740*91f16700Schasinglulu 
1741*91f16700Schasinglulu 	spin_unlock(&mbox->lock);
1742*91f16700Schasinglulu 	spin_unlock(&spmc_shmem_obj_state.lock);
1743*91f16700Schasinglulu 
1744*91f16700Schasinglulu 	SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1745*91f16700Schasinglulu 		 copy_size, sender_id, 0, 0, 0);
1746*91f16700Schasinglulu 
1747*91f16700Schasinglulu err_unlock_all:
1748*91f16700Schasinglulu 	spin_unlock(&mbox->lock);
1749*91f16700Schasinglulu err_unlock_shmem:
1750*91f16700Schasinglulu 	spin_unlock(&spmc_shmem_obj_state.lock);
1751*91f16700Schasinglulu 	return spmc_ffa_error_return(handle, ret);
1752*91f16700Schasinglulu }
1753*91f16700Schasinglulu 
1754*91f16700Schasinglulu /**
1755*91f16700Schasinglulu  * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1756*91f16700Schasinglulu  * @client:             Client state.
1757*91f16700Schasinglulu  *
1758*91f16700Schasinglulu  * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1759*91f16700Schasinglulu  * Used by secure os release previously shared memory to non-secure os.
1760*91f16700Schasinglulu  *
1761*91f16700Schasinglulu  * The handle to release must be in the client's (secure os's) transmit buffer.
1762*91f16700Schasinglulu  *
1763*91f16700Schasinglulu  * Return: 0 on success, error code on failure.
1764*91f16700Schasinglulu  */
1765*91f16700Schasinglulu int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1766*91f16700Schasinglulu 			    bool secure_origin,
1767*91f16700Schasinglulu 			    uint32_t handle_low,
1768*91f16700Schasinglulu 			    uint32_t handle_high,
1769*91f16700Schasinglulu 			    uint32_t fragment_offset,
1770*91f16700Schasinglulu 			    uint32_t sender_id,
1771*91f16700Schasinglulu 			    void *cookie,
1772*91f16700Schasinglulu 			    void *handle,
1773*91f16700Schasinglulu 			    uint64_t flags)
1774*91f16700Schasinglulu {
1775*91f16700Schasinglulu 	int ret;
1776*91f16700Schasinglulu 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1777*91f16700Schasinglulu 	struct spmc_shmem_obj *obj;
1778*91f16700Schasinglulu 	const struct ffa_mem_relinquish_descriptor *req;
1779*91f16700Schasinglulu 	struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1780*91f16700Schasinglulu 
1781*91f16700Schasinglulu 	if (!secure_origin) {
1782*91f16700Schasinglulu 		WARN("%s: unsupported relinquish direction.\n", __func__);
1783*91f16700Schasinglulu 		return spmc_ffa_error_return(handle,
1784*91f16700Schasinglulu 					     FFA_ERROR_INVALID_PARAMETER);
1785*91f16700Schasinglulu 	}
1786*91f16700Schasinglulu 
1787*91f16700Schasinglulu 	spin_lock(&mbox->lock);
1788*91f16700Schasinglulu 
1789*91f16700Schasinglulu 	if (mbox->rxtx_page_count == 0U) {
1790*91f16700Schasinglulu 		WARN("%s: buffer pair not registered.\n", __func__);
1791*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1792*91f16700Schasinglulu 		goto err_unlock_mailbox;
1793*91f16700Schasinglulu 	}
1794*91f16700Schasinglulu 
1795*91f16700Schasinglulu 	req = mbox->tx_buffer;
1796*91f16700Schasinglulu 
1797*91f16700Schasinglulu 	if (req->flags != 0U) {
1798*91f16700Schasinglulu 		WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1799*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1800*91f16700Schasinglulu 		goto err_unlock_mailbox;
1801*91f16700Schasinglulu 	}
1802*91f16700Schasinglulu 
1803*91f16700Schasinglulu 	if (req->endpoint_count == 0) {
1804*91f16700Schasinglulu 		WARN("%s: endpoint count cannot be 0.\n", __func__);
1805*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1806*91f16700Schasinglulu 		goto err_unlock_mailbox;
1807*91f16700Schasinglulu 	}
1808*91f16700Schasinglulu 
1809*91f16700Schasinglulu 	spin_lock(&spmc_shmem_obj_state.lock);
1810*91f16700Schasinglulu 
1811*91f16700Schasinglulu 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1812*91f16700Schasinglulu 	if (obj == NULL) {
1813*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1814*91f16700Schasinglulu 		goto err_unlock_all;
1815*91f16700Schasinglulu 	}
1816*91f16700Schasinglulu 
1817*91f16700Schasinglulu 	/*
1818*91f16700Schasinglulu 	 * Validate the endpoint ID was populated correctly. We don't currently
1819*91f16700Schasinglulu 	 * support proxy endpoints so the endpoint count should always be 1.
1820*91f16700Schasinglulu 	 */
1821*91f16700Schasinglulu 	if (req->endpoint_count != 1U) {
1822*91f16700Schasinglulu 		WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1823*91f16700Schasinglulu 		     req->endpoint_count);
1824*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1825*91f16700Schasinglulu 		goto err_unlock_all;
1826*91f16700Schasinglulu 	}
1827*91f16700Schasinglulu 
1828*91f16700Schasinglulu 	/* Validate provided endpoint ID matches the partition ID. */
1829*91f16700Schasinglulu 	if (req->endpoint_array[0] != sp_ctx->sp_id) {
1830*91f16700Schasinglulu 		WARN("%s: invalid endpoint ID %u != %u\n", __func__,
1831*91f16700Schasinglulu 		     req->endpoint_array[0], sp_ctx->sp_id);
1832*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1833*91f16700Schasinglulu 		goto err_unlock_all;
1834*91f16700Schasinglulu 	}
1835*91f16700Schasinglulu 
1836*91f16700Schasinglulu 	/* Validate the caller is a valid participant. */
1837*91f16700Schasinglulu 	if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
1838*91f16700Schasinglulu 		WARN("%s: Invalid endpoint ID (0x%x).\n",
1839*91f16700Schasinglulu 			__func__, req->endpoint_array[0]);
1840*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1841*91f16700Schasinglulu 		goto err_unlock_all;
1842*91f16700Schasinglulu 	}
1843*91f16700Schasinglulu 
1844*91f16700Schasinglulu 	if (obj->in_use == 0U) {
1845*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1846*91f16700Schasinglulu 		goto err_unlock_all;
1847*91f16700Schasinglulu 	}
1848*91f16700Schasinglulu 	obj->in_use--;
1849*91f16700Schasinglulu 
1850*91f16700Schasinglulu 	spin_unlock(&spmc_shmem_obj_state.lock);
1851*91f16700Schasinglulu 	spin_unlock(&mbox->lock);
1852*91f16700Schasinglulu 
1853*91f16700Schasinglulu 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1854*91f16700Schasinglulu 
1855*91f16700Schasinglulu err_unlock_all:
1856*91f16700Schasinglulu 	spin_unlock(&spmc_shmem_obj_state.lock);
1857*91f16700Schasinglulu err_unlock_mailbox:
1858*91f16700Schasinglulu 	spin_unlock(&mbox->lock);
1859*91f16700Schasinglulu 	return spmc_ffa_error_return(handle, ret);
1860*91f16700Schasinglulu }
1861*91f16700Schasinglulu 
1862*91f16700Schasinglulu /**
1863*91f16700Schasinglulu  * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1864*91f16700Schasinglulu  * @client:         Client state.
1865*91f16700Schasinglulu  * @handle_low:     Unique handle of shared memory object to reclaim. Bit[31:0].
1866*91f16700Schasinglulu  * @handle_high:    Unique handle of shared memory object to reclaim.
1867*91f16700Schasinglulu  *                  Bit[63:32].
1868*91f16700Schasinglulu  * @flags:          Unsupported, ignored.
1869*91f16700Schasinglulu  *
1870*91f16700Schasinglulu  * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1871*91f16700Schasinglulu  * Used by non-secure os reclaim memory previously shared with secure os.
1872*91f16700Schasinglulu  *
1873*91f16700Schasinglulu  * Return: 0 on success, error code on failure.
1874*91f16700Schasinglulu  */
1875*91f16700Schasinglulu int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1876*91f16700Schasinglulu 			 bool secure_origin,
1877*91f16700Schasinglulu 			 uint32_t handle_low,
1878*91f16700Schasinglulu 			 uint32_t handle_high,
1879*91f16700Schasinglulu 			 uint32_t mem_flags,
1880*91f16700Schasinglulu 			 uint64_t x4,
1881*91f16700Schasinglulu 			 void *cookie,
1882*91f16700Schasinglulu 			 void *handle,
1883*91f16700Schasinglulu 			 uint64_t flags)
1884*91f16700Schasinglulu {
1885*91f16700Schasinglulu 	int ret;
1886*91f16700Schasinglulu 	struct spmc_shmem_obj *obj;
1887*91f16700Schasinglulu 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1888*91f16700Schasinglulu 
1889*91f16700Schasinglulu 	if (secure_origin) {
1890*91f16700Schasinglulu 		WARN("%s: unsupported reclaim direction.\n", __func__);
1891*91f16700Schasinglulu 		return spmc_ffa_error_return(handle,
1892*91f16700Schasinglulu 					     FFA_ERROR_INVALID_PARAMETER);
1893*91f16700Schasinglulu 	}
1894*91f16700Schasinglulu 
1895*91f16700Schasinglulu 	if (mem_flags != 0U) {
1896*91f16700Schasinglulu 		WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1897*91f16700Schasinglulu 		return spmc_ffa_error_return(handle,
1898*91f16700Schasinglulu 					     FFA_ERROR_INVALID_PARAMETER);
1899*91f16700Schasinglulu 	}
1900*91f16700Schasinglulu 
1901*91f16700Schasinglulu 	spin_lock(&spmc_shmem_obj_state.lock);
1902*91f16700Schasinglulu 
1903*91f16700Schasinglulu 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1904*91f16700Schasinglulu 	if (obj == NULL) {
1905*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1906*91f16700Schasinglulu 		goto err_unlock;
1907*91f16700Schasinglulu 	}
1908*91f16700Schasinglulu 	if (obj->in_use != 0U) {
1909*91f16700Schasinglulu 		ret = FFA_ERROR_DENIED;
1910*91f16700Schasinglulu 		goto err_unlock;
1911*91f16700Schasinglulu 	}
1912*91f16700Schasinglulu 
1913*91f16700Schasinglulu 	if (obj->desc_filled != obj->desc_size) {
1914*91f16700Schasinglulu 		WARN("%s: incomplete object desc filled %zu < size %zu\n",
1915*91f16700Schasinglulu 		     __func__, obj->desc_filled, obj->desc_size);
1916*91f16700Schasinglulu 		ret = FFA_ERROR_INVALID_PARAMETER;
1917*91f16700Schasinglulu 		goto err_unlock;
1918*91f16700Schasinglulu 	}
1919*91f16700Schasinglulu 
1920*91f16700Schasinglulu 	/* Allow for platform specific operations to be performed. */
1921*91f16700Schasinglulu 	ret = plat_spmc_shmem_reclaim(&obj->desc);
1922*91f16700Schasinglulu 	if (ret != 0) {
1923*91f16700Schasinglulu 		goto err_unlock;
1924*91f16700Schasinglulu 	}
1925*91f16700Schasinglulu 
1926*91f16700Schasinglulu 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1927*91f16700Schasinglulu 	spin_unlock(&spmc_shmem_obj_state.lock);
1928*91f16700Schasinglulu 
1929*91f16700Schasinglulu 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1930*91f16700Schasinglulu 
1931*91f16700Schasinglulu err_unlock:
1932*91f16700Schasinglulu 	spin_unlock(&spmc_shmem_obj_state.lock);
1933*91f16700Schasinglulu 	return spmc_ffa_error_return(handle, ret);
1934*91f16700Schasinglulu }
1935