1*91f16700Schasinglulu /* 2*91f16700Schasinglulu * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. 3*91f16700Schasinglulu * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. 4*91f16700Schasinglulu * 5*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause 6*91f16700Schasinglulu */ 7*91f16700Schasinglulu 8*91f16700Schasinglulu #include <arch_helpers.h> 9*91f16700Schasinglulu #include <assert.h> 10*91f16700Schasinglulu #include <common/debug.h> 11*91f16700Schasinglulu #include <drivers/delay_timer.h> 12*91f16700Schasinglulu #include <errno.h> 13*91f16700Schasinglulu #include <lib/mmio.h> 14*91f16700Schasinglulu #include <lib/psci/psci.h> 15*91f16700Schasinglulu #include <se_private.h> 16*91f16700Schasinglulu #include <security_engine.h> 17*91f16700Schasinglulu #include <tegra_platform.h> 18*91f16700Schasinglulu 19*91f16700Schasinglulu /******************************************************************************* 20*91f16700Schasinglulu * Constants and Macros 21*91f16700Schasinglulu ******************************************************************************/ 22*91f16700Schasinglulu 23*91f16700Schasinglulu #define TIMEOUT_100MS 100U /* Timeout in 100ms */ 24*91f16700Schasinglulu #define RNG_AES_KEY_INDEX 1 25*91f16700Schasinglulu 26*91f16700Schasinglulu /******************************************************************************* 27*91f16700Schasinglulu * Data structure and global variables 28*91f16700Schasinglulu ******************************************************************************/ 29*91f16700Schasinglulu 30*91f16700Schasinglulu /* The security engine contexts are formatted as follows: 31*91f16700Schasinglulu * 32*91f16700Schasinglulu * SE1 CONTEXT: 33*91f16700Schasinglulu * #--------------------------------# 34*91f16700Schasinglulu * | Random Data 1 Block | 35*91f16700Schasinglulu * #--------------------------------# 36*91f16700Schasinglulu * | Sticky Bits 2 Blocks | 37*91f16700Schasinglulu * #--------------------------------# 38*91f16700Schasinglulu * | Key Table 64 Blocks | 39*91f16700Schasinglulu * | For each Key (x16): | 40*91f16700Schasinglulu * | Key: 2 Blocks | 41*91f16700Schasinglulu * | Original-IV: 1 Block | 42*91f16700Schasinglulu * | Updated-IV: 1 Block | 43*91f16700Schasinglulu * #--------------------------------# 44*91f16700Schasinglulu * | RSA Keys 64 Blocks | 45*91f16700Schasinglulu * #--------------------------------# 46*91f16700Schasinglulu * | Known Pattern 1 Block | 47*91f16700Schasinglulu * #--------------------------------# 48*91f16700Schasinglulu * 49*91f16700Schasinglulu * SE2/PKA1 CONTEXT: 50*91f16700Schasinglulu * #--------------------------------# 51*91f16700Schasinglulu * | Random Data 1 Block | 52*91f16700Schasinglulu * #--------------------------------# 53*91f16700Schasinglulu * | Sticky Bits 2 Blocks | 54*91f16700Schasinglulu * #--------------------------------# 55*91f16700Schasinglulu * | Key Table 64 Blocks | 56*91f16700Schasinglulu * | For each Key (x16): | 57*91f16700Schasinglulu * | Key: 2 Blocks | 58*91f16700Schasinglulu * | Original-IV: 1 Block | 59*91f16700Schasinglulu * | Updated-IV: 1 Block | 60*91f16700Schasinglulu * #--------------------------------# 61*91f16700Schasinglulu * | RSA Keys 64 Blocks | 62*91f16700Schasinglulu * #--------------------------------# 63*91f16700Schasinglulu * | PKA sticky bits 1 Block | 64*91f16700Schasinglulu * #--------------------------------# 65*91f16700Schasinglulu * | PKA keys 512 Blocks | 66*91f16700Schasinglulu * #--------------------------------# 67*91f16700Schasinglulu * | Known Pattern 1 Block | 68*91f16700Schasinglulu * #--------------------------------# 69*91f16700Schasinglulu */ 70*91f16700Schasinglulu 71*91f16700Schasinglulu /* Known pattern data for T210 */ 72*91f16700Schasinglulu static const uint8_t se_ctx_known_pattern_data[SE_CTX_KNOWN_PATTERN_SIZE] = { 73*91f16700Schasinglulu /* 128 bit AES block */ 74*91f16700Schasinglulu 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 75*91f16700Schasinglulu 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f 76*91f16700Schasinglulu }; 77*91f16700Schasinglulu 78*91f16700Schasinglulu /* SE input and output linked list buffers */ 79*91f16700Schasinglulu static tegra_se_io_lst_t se1_src_ll_buf; 80*91f16700Schasinglulu static tegra_se_io_lst_t se1_dst_ll_buf; 81*91f16700Schasinglulu 82*91f16700Schasinglulu /* SE2 input and output linked list buffers */ 83*91f16700Schasinglulu static tegra_se_io_lst_t se2_src_ll_buf; 84*91f16700Schasinglulu static tegra_se_io_lst_t se2_dst_ll_buf; 85*91f16700Schasinglulu 86*91f16700Schasinglulu /* SE1 context buffer, 132 blocks */ 87*91f16700Schasinglulu static __aligned(64) uint8_t se1_ctx_buf[SE_CTX_DRBG_BUFER_SIZE]; 88*91f16700Schasinglulu 89*91f16700Schasinglulu /* SE1 security engine device handle */ 90*91f16700Schasinglulu static tegra_se_dev_t se_dev_1 = { 91*91f16700Schasinglulu .se_num = 1, 92*91f16700Schasinglulu /* Setup base address for se */ 93*91f16700Schasinglulu .se_base = TEGRA_SE1_BASE, 94*91f16700Schasinglulu /* Setup context size in AES blocks */ 95*91f16700Schasinglulu .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1, 96*91f16700Schasinglulu /* Setup SRC buffers for SE operations */ 97*91f16700Schasinglulu .src_ll_buf = &se1_src_ll_buf, 98*91f16700Schasinglulu /* Setup DST buffers for SE operations */ 99*91f16700Schasinglulu .dst_ll_buf = &se1_dst_ll_buf, 100*91f16700Schasinglulu /* Setup context save destination */ 101*91f16700Schasinglulu .ctx_save_buf = (uint32_t *)&se1_ctx_buf 102*91f16700Schasinglulu }; 103*91f16700Schasinglulu 104*91f16700Schasinglulu /* SE2 security engine device handle (T210B01 only) */ 105*91f16700Schasinglulu static tegra_se_dev_t se_dev_2 = { 106*91f16700Schasinglulu .se_num = 2, 107*91f16700Schasinglulu /* Setup base address for se */ 108*91f16700Schasinglulu .se_base = TEGRA_SE2_BASE, 109*91f16700Schasinglulu /* Setup context size in AES blocks */ 110*91f16700Schasinglulu .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2, 111*91f16700Schasinglulu /* Setup SRC buffers for SE operations */ 112*91f16700Schasinglulu .src_ll_buf = &se2_src_ll_buf, 113*91f16700Schasinglulu /* Setup DST buffers for SE operations */ 114*91f16700Schasinglulu .dst_ll_buf = &se2_dst_ll_buf, 115*91f16700Schasinglulu /* Setup context save destination */ 116*91f16700Schasinglulu .ctx_save_buf = (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE + 0x1000) 117*91f16700Schasinglulu }; 118*91f16700Schasinglulu 119*91f16700Schasinglulu static bool ecid_valid; 120*91f16700Schasinglulu 121*91f16700Schasinglulu /******************************************************************************* 122*91f16700Schasinglulu * Functions Definition 123*91f16700Schasinglulu ******************************************************************************/ 124*91f16700Schasinglulu 125*91f16700Schasinglulu static void tegra_se_make_data_coherent(const tegra_se_dev_t *se_dev) 126*91f16700Schasinglulu { 127*91f16700Schasinglulu flush_dcache_range(((uint64_t)(se_dev->src_ll_buf)), 128*91f16700Schasinglulu sizeof(tegra_se_io_lst_t)); 129*91f16700Schasinglulu flush_dcache_range(((uint64_t)(se_dev->dst_ll_buf)), 130*91f16700Schasinglulu sizeof(tegra_se_io_lst_t)); 131*91f16700Schasinglulu } 132*91f16700Schasinglulu 133*91f16700Schasinglulu /* 134*91f16700Schasinglulu * Check that SE operation has completed after kickoff 135*91f16700Schasinglulu * This function is invoked after an SE operation has been started, 136*91f16700Schasinglulu * and it checks the following conditions: 137*91f16700Schasinglulu * 1. SE_INT_STATUS = SE_OP_DONE 138*91f16700Schasinglulu * 2. SE_STATUS = IDLE 139*91f16700Schasinglulu * 3. AHB bus data transfer complete. 140*91f16700Schasinglulu * 4. SE_ERR_STATUS is clean. 141*91f16700Schasinglulu */ 142*91f16700Schasinglulu static int32_t tegra_se_operation_complete(const tegra_se_dev_t *se_dev) 143*91f16700Schasinglulu { 144*91f16700Schasinglulu uint32_t val = 0; 145*91f16700Schasinglulu int32_t ret = 0; 146*91f16700Schasinglulu uint32_t timeout; 147*91f16700Schasinglulu 148*91f16700Schasinglulu /* Poll the SE interrupt register to ensure H/W operation complete */ 149*91f16700Schasinglulu val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET); 150*91f16700Schasinglulu for (timeout = 0; (SE_INT_OP_DONE(val) == SE_INT_OP_DONE_CLEAR) && 151*91f16700Schasinglulu (timeout < TIMEOUT_100MS); timeout++) { 152*91f16700Schasinglulu mdelay(1); 153*91f16700Schasinglulu val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET); 154*91f16700Schasinglulu } 155*91f16700Schasinglulu 156*91f16700Schasinglulu if (timeout == TIMEOUT_100MS) { 157*91f16700Schasinglulu ERROR("%s: ERR: Atomic context save operation timeout!\n", 158*91f16700Schasinglulu __func__); 159*91f16700Schasinglulu ret = -ETIMEDOUT; 160*91f16700Schasinglulu } 161*91f16700Schasinglulu 162*91f16700Schasinglulu /* Poll the SE status idle to ensure H/W operation complete */ 163*91f16700Schasinglulu if (ret == 0) { 164*91f16700Schasinglulu val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET); 165*91f16700Schasinglulu for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); 166*91f16700Schasinglulu timeout++) { 167*91f16700Schasinglulu mdelay(1); 168*91f16700Schasinglulu val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET); 169*91f16700Schasinglulu } 170*91f16700Schasinglulu 171*91f16700Schasinglulu if (timeout == TIMEOUT_100MS) { 172*91f16700Schasinglulu ERROR("%s: ERR: MEM_INTERFACE and SE state " 173*91f16700Schasinglulu "idle state timeout.\n", __func__); 174*91f16700Schasinglulu ret = -ETIMEDOUT; 175*91f16700Schasinglulu } 176*91f16700Schasinglulu } 177*91f16700Schasinglulu 178*91f16700Schasinglulu /* Check AHB bus transfer complete */ 179*91f16700Schasinglulu if (ret == 0) { 180*91f16700Schasinglulu val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET); 181*91f16700Schasinglulu for (timeout = 0; ((val & (ARAHB_MST_ID_SE_MASK | ARAHB_MST_ID_SE2_MASK)) != 0U) && 182*91f16700Schasinglulu (timeout < TIMEOUT_100MS); timeout++) { 183*91f16700Schasinglulu mdelay(1); 184*91f16700Schasinglulu val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET); 185*91f16700Schasinglulu } 186*91f16700Schasinglulu 187*91f16700Schasinglulu if (timeout == TIMEOUT_100MS) { 188*91f16700Schasinglulu ERROR("%s: SE write over AHB timeout.\n", __func__); 189*91f16700Schasinglulu ret = -ETIMEDOUT; 190*91f16700Schasinglulu } 191*91f16700Schasinglulu } 192*91f16700Schasinglulu 193*91f16700Schasinglulu /* Ensure that no errors are thrown during operation */ 194*91f16700Schasinglulu if (ret == 0) { 195*91f16700Schasinglulu val = tegra_se_read_32(se_dev, SE_ERR_STATUS_REG_OFFSET); 196*91f16700Schasinglulu if (val != 0U) { 197*91f16700Schasinglulu ERROR("%s: error during SE operation! 0x%x", __func__, val); 198*91f16700Schasinglulu ret = -ENOTSUP; 199*91f16700Schasinglulu } 200*91f16700Schasinglulu } 201*91f16700Schasinglulu 202*91f16700Schasinglulu return ret; 203*91f16700Schasinglulu } 204*91f16700Schasinglulu 205*91f16700Schasinglulu /* 206*91f16700Schasinglulu * Wait for SE engine to be idle and clear pending interrupts before 207*91f16700Schasinglulu * starting the next SE operation. 208*91f16700Schasinglulu */ 209*91f16700Schasinglulu static int32_t tegra_se_operation_prepare(const tegra_se_dev_t *se_dev) 210*91f16700Schasinglulu { 211*91f16700Schasinglulu int32_t ret = 0; 212*91f16700Schasinglulu uint32_t val = 0; 213*91f16700Schasinglulu uint32_t timeout; 214*91f16700Schasinglulu 215*91f16700Schasinglulu /* disable SE interrupt to prevent interrupt issued by SE operation */ 216*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_INT_ENABLE_REG_OFFSET, 0U); 217*91f16700Schasinglulu 218*91f16700Schasinglulu /* Wait for previous operation to finish */ 219*91f16700Schasinglulu val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET); 220*91f16700Schasinglulu for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); timeout++) { 221*91f16700Schasinglulu mdelay(1); 222*91f16700Schasinglulu val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET); 223*91f16700Schasinglulu } 224*91f16700Schasinglulu 225*91f16700Schasinglulu if (timeout == TIMEOUT_100MS) { 226*91f16700Schasinglulu ERROR("%s: ERR: SE status is not idle!\n", __func__); 227*91f16700Schasinglulu ret = -ETIMEDOUT; 228*91f16700Schasinglulu } 229*91f16700Schasinglulu 230*91f16700Schasinglulu /* Clear any pending interrupts from previous operation */ 231*91f16700Schasinglulu val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET); 232*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_INT_STATUS_REG_OFFSET, val); 233*91f16700Schasinglulu return ret; 234*91f16700Schasinglulu } 235*91f16700Schasinglulu 236*91f16700Schasinglulu /* 237*91f16700Schasinglulu * SE atomic context save. At SC7 entry, SE driver triggers the 238*91f16700Schasinglulu * hardware automatically performs the context save operation. 239*91f16700Schasinglulu */ 240*91f16700Schasinglulu static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev) 241*91f16700Schasinglulu { 242*91f16700Schasinglulu int32_t ret = 0; 243*91f16700Schasinglulu uint32_t val = 0; 244*91f16700Schasinglulu uint32_t blk_count_limit = 0; 245*91f16700Schasinglulu uint32_t block_count; 246*91f16700Schasinglulu 247*91f16700Schasinglulu /* Check that previous operation is finalized */ 248*91f16700Schasinglulu ret = tegra_se_operation_prepare(se_dev); 249*91f16700Schasinglulu 250*91f16700Schasinglulu /* Read the context save progress counter: block_count 251*91f16700Schasinglulu * Ensure no previous context save has been triggered 252*91f16700Schasinglulu * SE_CTX_SAVE_AUTO.CURR_CNT == 0 253*91f16700Schasinglulu */ 254*91f16700Schasinglulu if (ret == 0) { 255*91f16700Schasinglulu val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET); 256*91f16700Schasinglulu block_count = SE_CTX_SAVE_GET_BLK_COUNT(val); 257*91f16700Schasinglulu if (block_count != 0U) { 258*91f16700Schasinglulu ERROR("%s: ctx_save triggered multiple times\n", 259*91f16700Schasinglulu __func__); 260*91f16700Schasinglulu ret = -EALREADY; 261*91f16700Schasinglulu } 262*91f16700Schasinglulu } 263*91f16700Schasinglulu 264*91f16700Schasinglulu /* Set the destination block count when the context save complete */ 265*91f16700Schasinglulu if (ret == 0) { 266*91f16700Schasinglulu blk_count_limit = block_count + se_dev->ctx_size_blks; 267*91f16700Schasinglulu } 268*91f16700Schasinglulu 269*91f16700Schasinglulu /* Program SE_CONFIG register as for RNG operation 270*91f16700Schasinglulu * SE_CONFIG.ENC_ALG = RNG 271*91f16700Schasinglulu * SE_CONFIG.DEC_ALG = NOP 272*91f16700Schasinglulu * SE_CONFIG.ENC_MODE is ignored 273*91f16700Schasinglulu * SE_CONFIG.DEC_MODE is ignored 274*91f16700Schasinglulu * SE_CONFIG.DST = MEMORY 275*91f16700Schasinglulu */ 276*91f16700Schasinglulu if (ret == 0) { 277*91f16700Schasinglulu val = (SE_CONFIG_ENC_ALG_RNG | 278*91f16700Schasinglulu SE_CONFIG_DEC_ALG_NOP | 279*91f16700Schasinglulu SE_CONFIG_DST_MEMORY); 280*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val); 281*91f16700Schasinglulu 282*91f16700Schasinglulu tegra_se_make_data_coherent(se_dev); 283*91f16700Schasinglulu 284*91f16700Schasinglulu /* SE_CTX_SAVE operation */ 285*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, 286*91f16700Schasinglulu SE_OP_CTX_SAVE); 287*91f16700Schasinglulu 288*91f16700Schasinglulu ret = tegra_se_operation_complete(se_dev); 289*91f16700Schasinglulu } 290*91f16700Schasinglulu 291*91f16700Schasinglulu /* Check that context has written the correct number of blocks */ 292*91f16700Schasinglulu if (ret == 0) { 293*91f16700Schasinglulu val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET); 294*91f16700Schasinglulu if (SE_CTX_SAVE_GET_BLK_COUNT(val) != blk_count_limit) { 295*91f16700Schasinglulu ERROR("%s: expected %d blocks but %d were written\n", 296*91f16700Schasinglulu __func__, blk_count_limit, val); 297*91f16700Schasinglulu ret = -ECANCELED; 298*91f16700Schasinglulu } 299*91f16700Schasinglulu } 300*91f16700Schasinglulu 301*91f16700Schasinglulu return ret; 302*91f16700Schasinglulu } 303*91f16700Schasinglulu 304*91f16700Schasinglulu /* 305*91f16700Schasinglulu * Security engine primitive operations, including normal operation 306*91f16700Schasinglulu * and the context save operation. 307*91f16700Schasinglulu */ 308*91f16700Schasinglulu static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes, 309*91f16700Schasinglulu bool context_save) 310*91f16700Schasinglulu { 311*91f16700Schasinglulu uint32_t nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE; 312*91f16700Schasinglulu int ret = 0; 313*91f16700Schasinglulu 314*91f16700Schasinglulu assert(se_dev); 315*91f16700Schasinglulu 316*91f16700Schasinglulu /* Use device buffers for in and out */ 317*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_OUT_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->dst_ll_buf))); 318*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_IN_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->src_ll_buf))); 319*91f16700Schasinglulu 320*91f16700Schasinglulu /* Check that previous operation is finalized */ 321*91f16700Schasinglulu ret = tegra_se_operation_prepare(se_dev); 322*91f16700Schasinglulu if (ret != 0) { 323*91f16700Schasinglulu goto op_error; 324*91f16700Schasinglulu } 325*91f16700Schasinglulu 326*91f16700Schasinglulu /* Program SE operation size */ 327*91f16700Schasinglulu if (nblocks) { 328*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_BLOCK_COUNT_REG_OFFSET, nblocks - 1); 329*91f16700Schasinglulu } 330*91f16700Schasinglulu 331*91f16700Schasinglulu /* Make SE LL data coherent before the SE operation */ 332*91f16700Schasinglulu tegra_se_make_data_coherent(se_dev); 333*91f16700Schasinglulu 334*91f16700Schasinglulu /* Start hardware operation */ 335*91f16700Schasinglulu if (context_save) 336*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_CTX_SAVE); 337*91f16700Schasinglulu else 338*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_START); 339*91f16700Schasinglulu 340*91f16700Schasinglulu /* Wait for operation to finish */ 341*91f16700Schasinglulu ret = tegra_se_operation_complete(se_dev); 342*91f16700Schasinglulu 343*91f16700Schasinglulu op_error: 344*91f16700Schasinglulu return ret; 345*91f16700Schasinglulu } 346*91f16700Schasinglulu 347*91f16700Schasinglulu /* 348*91f16700Schasinglulu * Normal security engine operations other than the context save 349*91f16700Schasinglulu */ 350*91f16700Schasinglulu int tegra_se_start_normal_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes) 351*91f16700Schasinglulu { 352*91f16700Schasinglulu return tegra_se_perform_operation(se_dev, nbytes, false); 353*91f16700Schasinglulu } 354*91f16700Schasinglulu 355*91f16700Schasinglulu /* 356*91f16700Schasinglulu * Security engine context save operation 357*91f16700Schasinglulu */ 358*91f16700Schasinglulu int tegra_se_start_ctx_save_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes) 359*91f16700Schasinglulu { 360*91f16700Schasinglulu return tegra_se_perform_operation(se_dev, nbytes, true); 361*91f16700Schasinglulu } 362*91f16700Schasinglulu 363*91f16700Schasinglulu /* 364*91f16700Schasinglulu * Security Engine sequence to generat SRK 365*91f16700Schasinglulu * SE and SE2 will generate different SRK by different 366*91f16700Schasinglulu * entropy seeds. 367*91f16700Schasinglulu */ 368*91f16700Schasinglulu static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev) 369*91f16700Schasinglulu { 370*91f16700Schasinglulu int ret = PSCI_E_INTERN_FAIL; 371*91f16700Schasinglulu uint32_t val; 372*91f16700Schasinglulu 373*91f16700Schasinglulu /* Confgure the following hardware register settings: 374*91f16700Schasinglulu * SE_CONFIG.DEC_ALG = NOP 375*91f16700Schasinglulu * SE_CONFIG.ENC_ALG = RNG 376*91f16700Schasinglulu * SE_CONFIG.DST = SRK 377*91f16700Schasinglulu * SE_OPERATION.OP = START 378*91f16700Schasinglulu * SE_CRYPTO_LAST_BLOCK = 0 379*91f16700Schasinglulu */ 380*91f16700Schasinglulu se_dev->src_ll_buf->last_buff_num = 0; 381*91f16700Schasinglulu se_dev->dst_ll_buf->last_buff_num = 0; 382*91f16700Schasinglulu 383*91f16700Schasinglulu /* Configure random number generator */ 384*91f16700Schasinglulu if (ecid_valid) 385*91f16700Schasinglulu val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_ENTROPY); 386*91f16700Schasinglulu else 387*91f16700Schasinglulu val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_ENTROPY); 388*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val); 389*91f16700Schasinglulu 390*91f16700Schasinglulu /* Configure output destination = SRK */ 391*91f16700Schasinglulu val = (SE_CONFIG_ENC_ALG_RNG | 392*91f16700Schasinglulu SE_CONFIG_DEC_ALG_NOP | 393*91f16700Schasinglulu SE_CONFIG_DST_SRK); 394*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val); 395*91f16700Schasinglulu 396*91f16700Schasinglulu /* Perform hardware operation */ 397*91f16700Schasinglulu ret = tegra_se_start_normal_operation(se_dev, 0); 398*91f16700Schasinglulu 399*91f16700Schasinglulu return ret; 400*91f16700Schasinglulu } 401*91f16700Schasinglulu 402*91f16700Schasinglulu /* 403*91f16700Schasinglulu * Generate plain text random data to some memory location using 404*91f16700Schasinglulu * SE/SE2's SP800-90 random number generator. The random data size 405*91f16700Schasinglulu * must be some multiple of the AES block size (16 bytes). 406*91f16700Schasinglulu */ 407*91f16700Schasinglulu static int tegra_se_lp_generate_random_data(tegra_se_dev_t *se_dev) 408*91f16700Schasinglulu { 409*91f16700Schasinglulu int ret = 0; 410*91f16700Schasinglulu uint32_t val; 411*91f16700Schasinglulu 412*91f16700Schasinglulu /* Set some arbitrary memory location to store the random data */ 413*91f16700Schasinglulu se_dev->dst_ll_buf->last_buff_num = 0; 414*91f16700Schasinglulu if (!se_dev->ctx_save_buf) { 415*91f16700Schasinglulu ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__); 416*91f16700Schasinglulu return PSCI_E_NOT_PRESENT; 417*91f16700Schasinglulu } 418*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *) 419*91f16700Schasinglulu se_dev->ctx_save_buf)->rand_data))); 420*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_RANDOM_DATA_SIZE; 421*91f16700Schasinglulu 422*91f16700Schasinglulu 423*91f16700Schasinglulu /* Confgure the following hardware register settings: 424*91f16700Schasinglulu * SE_CONFIG.DEC_ALG = NOP 425*91f16700Schasinglulu * SE_CONFIG.ENC_ALG = RNG 426*91f16700Schasinglulu * SE_CONFIG.ENC_MODE = KEY192 427*91f16700Schasinglulu * SE_CONFIG.DST = MEMORY 428*91f16700Schasinglulu */ 429*91f16700Schasinglulu val = (SE_CONFIG_ENC_ALG_RNG | 430*91f16700Schasinglulu SE_CONFIG_DEC_ALG_NOP | 431*91f16700Schasinglulu SE_CONFIG_ENC_MODE_KEY192 | 432*91f16700Schasinglulu SE_CONFIG_DST_MEMORY); 433*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val); 434*91f16700Schasinglulu 435*91f16700Schasinglulu /* Program the RNG options in SE_CRYPTO_CONFIG as follows: 436*91f16700Schasinglulu * XOR_POS = BYPASS 437*91f16700Schasinglulu * INPUT_SEL = RANDOM (Entropy or LFSR) 438*91f16700Schasinglulu * HASH_ENB = DISABLE 439*91f16700Schasinglulu */ 440*91f16700Schasinglulu val = (SE_CRYPTO_INPUT_RANDOM | 441*91f16700Schasinglulu SE_CRYPTO_XOR_BYPASS | 442*91f16700Schasinglulu SE_CRYPTO_CORE_ENCRYPT | 443*91f16700Schasinglulu SE_CRYPTO_HASH_DISABLE | 444*91f16700Schasinglulu SE_CRYPTO_KEY_INDEX(RNG_AES_KEY_INDEX) | 445*91f16700Schasinglulu SE_CRYPTO_IV_ORIGINAL); 446*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_CRYPTO_REG_OFFSET, val); 447*91f16700Schasinglulu 448*91f16700Schasinglulu /* Configure RNG */ 449*91f16700Schasinglulu if (ecid_valid) 450*91f16700Schasinglulu val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_LFSR); 451*91f16700Schasinglulu else 452*91f16700Schasinglulu val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_LFSR); 453*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val); 454*91f16700Schasinglulu 455*91f16700Schasinglulu /* SE normal operation */ 456*91f16700Schasinglulu ret = tegra_se_start_normal_operation(se_dev, SE_CTX_SAVE_RANDOM_DATA_SIZE); 457*91f16700Schasinglulu 458*91f16700Schasinglulu return ret; 459*91f16700Schasinglulu } 460*91f16700Schasinglulu 461*91f16700Schasinglulu /* 462*91f16700Schasinglulu * Encrypt memory blocks with SRK as part of the security engine context. 463*91f16700Schasinglulu * The data blocks include: random data and the known pattern data, where 464*91f16700Schasinglulu * the random data is the first block and known pattern is the last block. 465*91f16700Schasinglulu */ 466*91f16700Schasinglulu static int tegra_se_lp_data_context_save(tegra_se_dev_t *se_dev, 467*91f16700Schasinglulu uint64_t src_addr, uint64_t dst_addr, uint32_t data_size) 468*91f16700Schasinglulu { 469*91f16700Schasinglulu int ret = 0; 470*91f16700Schasinglulu 471*91f16700Schasinglulu se_dev->src_ll_buf->last_buff_num = 0; 472*91f16700Schasinglulu se_dev->dst_ll_buf->last_buff_num = 0; 473*91f16700Schasinglulu se_dev->src_ll_buf->buffer[0].addr = src_addr; 474*91f16700Schasinglulu se_dev->src_ll_buf->buffer[0].data_len = data_size; 475*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].addr = dst_addr; 476*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].data_len = data_size; 477*91f16700Schasinglulu 478*91f16700Schasinglulu /* By setting the context source from memory and calling the context save 479*91f16700Schasinglulu * operation, the SE encrypts the memory data with SRK. 480*91f16700Schasinglulu */ 481*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, SE_CTX_SAVE_SRC_MEM); 482*91f16700Schasinglulu 483*91f16700Schasinglulu ret = tegra_se_start_ctx_save_operation(se_dev, data_size); 484*91f16700Schasinglulu 485*91f16700Schasinglulu return ret; 486*91f16700Schasinglulu } 487*91f16700Schasinglulu 488*91f16700Schasinglulu /* 489*91f16700Schasinglulu * Context save the key table access control sticky bits and 490*91f16700Schasinglulu * security status of each key-slot. The encrypted sticky-bits are 491*91f16700Schasinglulu * 32 bytes (2 AES blocks) and formatted as the following structure: 492*91f16700Schasinglulu * { bit in registers bit in context save 493*91f16700Schasinglulu * SECURITY_0[4] 158 494*91f16700Schasinglulu * SE_RSA_KEYTABLE_ACCE4SS_1[2:0] 157:155 495*91f16700Schasinglulu * SE_RSA_KEYTABLE_ACCE4SS_0[2:0] 154:152 496*91f16700Schasinglulu * SE_RSA_SECURITY_PERKEY_0[1:0] 151:150 497*91f16700Schasinglulu * SE_CRYPTO_KEYTABLE_ACCESS_15[7:0] 149:142 498*91f16700Schasinglulu * ..., 499*91f16700Schasinglulu * SE_CRYPTO_KEYTABLE_ACCESS_0[7:0] 29:22 500*91f16700Schasinglulu * SE_CRYPTO_SECURITY_PERKEY_0[15:0] 21:6 501*91f16700Schasinglulu * SE_TZRAM_SECURITY_0[1:0] 5:4 502*91f16700Schasinglulu * SE_SECURITY_0[16] 3:3 503*91f16700Schasinglulu * SE_SECURITY_0[2:0] } 2:0 504*91f16700Schasinglulu */ 505*91f16700Schasinglulu static int tegra_se_lp_sticky_bits_context_save(tegra_se_dev_t *se_dev) 506*91f16700Schasinglulu { 507*91f16700Schasinglulu int ret = PSCI_E_INTERN_FAIL; 508*91f16700Schasinglulu uint32_t val = 0; 509*91f16700Schasinglulu 510*91f16700Schasinglulu se_dev->dst_ll_buf->last_buff_num = 0; 511*91f16700Schasinglulu if (!se_dev->ctx_save_buf) { 512*91f16700Schasinglulu ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__); 513*91f16700Schasinglulu return PSCI_E_NOT_PRESENT; 514*91f16700Schasinglulu } 515*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *) 516*91f16700Schasinglulu se_dev->ctx_save_buf)->sticky_bits))); 517*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_STICKY_BITS_SIZE; 518*91f16700Schasinglulu 519*91f16700Schasinglulu /* 520*91f16700Schasinglulu * The 1st AES block save the sticky-bits context 1 - 16 bytes (0 - 3 words). 521*91f16700Schasinglulu * The 2nd AES block save the sticky-bits context 17 - 32 bytes (4 - 7 words). 522*91f16700Schasinglulu */ 523*91f16700Schasinglulu for (int i = 0; i < 2; i++) { 524*91f16700Schasinglulu val = SE_CTX_SAVE_SRC_STICKY_BITS | 525*91f16700Schasinglulu SE_CTX_SAVE_STICKY_WORD_QUAD(i); 526*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val); 527*91f16700Schasinglulu 528*91f16700Schasinglulu /* SE context save operation */ 529*91f16700Schasinglulu ret = tegra_se_start_ctx_save_operation(se_dev, 530*91f16700Schasinglulu SE_CTX_SAVE_STICKY_BITS_SIZE); 531*91f16700Schasinglulu if (ret) 532*91f16700Schasinglulu break; 533*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].addr += SE_CTX_SAVE_STICKY_BITS_SIZE; 534*91f16700Schasinglulu } 535*91f16700Schasinglulu 536*91f16700Schasinglulu return ret; 537*91f16700Schasinglulu } 538*91f16700Schasinglulu 539*91f16700Schasinglulu static int tegra_se_aeskeytable_context_save(tegra_se_dev_t *se_dev) 540*91f16700Schasinglulu { 541*91f16700Schasinglulu uint32_t val = 0; 542*91f16700Schasinglulu int ret = 0; 543*91f16700Schasinglulu 544*91f16700Schasinglulu se_dev->dst_ll_buf->last_buff_num = 0; 545*91f16700Schasinglulu if (!se_dev->ctx_save_buf) { 546*91f16700Schasinglulu ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__); 547*91f16700Schasinglulu ret = -EINVAL; 548*91f16700Schasinglulu goto aes_keytable_save_err; 549*91f16700Schasinglulu } 550*91f16700Schasinglulu 551*91f16700Schasinglulu /* AES key context save */ 552*91f16700Schasinglulu for (int slot = 0; slot < TEGRA_SE_AES_KEYSLOT_COUNT; slot++) { 553*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&( 554*91f16700Schasinglulu ((tegra_se_context_t *)se_dev-> 555*91f16700Schasinglulu ctx_save_buf)->key_slots[slot].key))); 556*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE; 557*91f16700Schasinglulu for (int i = 0; i < 2; i++) { 558*91f16700Schasinglulu val = SE_CTX_SAVE_SRC_AES_KEYTABLE | 559*91f16700Schasinglulu SE_CTX_SAVE_KEY_INDEX(slot) | 560*91f16700Schasinglulu SE_CTX_SAVE_WORD_QUAD(i); 561*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val); 562*91f16700Schasinglulu 563*91f16700Schasinglulu /* SE context save operation */ 564*91f16700Schasinglulu ret = tegra_se_start_ctx_save_operation(se_dev, 565*91f16700Schasinglulu TEGRA_SE_KEY_128_SIZE); 566*91f16700Schasinglulu if (ret) { 567*91f16700Schasinglulu ERROR("%s: ERR: AES key CTX_SAVE OP failed, " 568*91f16700Schasinglulu "slot=%d, word_quad=%d.\n", 569*91f16700Schasinglulu __func__, slot, i); 570*91f16700Schasinglulu goto aes_keytable_save_err; 571*91f16700Schasinglulu } 572*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].addr += TEGRA_SE_KEY_128_SIZE; 573*91f16700Schasinglulu } 574*91f16700Schasinglulu 575*91f16700Schasinglulu /* OIV context save */ 576*91f16700Schasinglulu se_dev->dst_ll_buf->last_buff_num = 0; 577*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&( 578*91f16700Schasinglulu ((tegra_se_context_t *)se_dev-> 579*91f16700Schasinglulu ctx_save_buf)->key_slots[slot].oiv))); 580*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE; 581*91f16700Schasinglulu 582*91f16700Schasinglulu val = SE_CTX_SAVE_SRC_AES_KEYTABLE | 583*91f16700Schasinglulu SE_CTX_SAVE_KEY_INDEX(slot) | 584*91f16700Schasinglulu SE_CTX_SAVE_WORD_QUAD_ORIG_IV; 585*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val); 586*91f16700Schasinglulu 587*91f16700Schasinglulu /* SE context save operation */ 588*91f16700Schasinglulu ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE); 589*91f16700Schasinglulu if (ret) { 590*91f16700Schasinglulu ERROR("%s: ERR: OIV CTX_SAVE OP failed, slot=%d.\n", 591*91f16700Schasinglulu __func__, slot); 592*91f16700Schasinglulu goto aes_keytable_save_err; 593*91f16700Schasinglulu } 594*91f16700Schasinglulu 595*91f16700Schasinglulu /* UIV context save */ 596*91f16700Schasinglulu se_dev->dst_ll_buf->last_buff_num = 0; 597*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&( 598*91f16700Schasinglulu ((tegra_se_context_t *)se_dev-> 599*91f16700Schasinglulu ctx_save_buf)->key_slots[slot].uiv))); 600*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE; 601*91f16700Schasinglulu 602*91f16700Schasinglulu val = SE_CTX_SAVE_SRC_AES_KEYTABLE | 603*91f16700Schasinglulu SE_CTX_SAVE_KEY_INDEX(slot) | 604*91f16700Schasinglulu SE_CTX_SAVE_WORD_QUAD_UPD_IV; 605*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val); 606*91f16700Schasinglulu 607*91f16700Schasinglulu /* SE context save operation */ 608*91f16700Schasinglulu ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE); 609*91f16700Schasinglulu if (ret) { 610*91f16700Schasinglulu ERROR("%s: ERR: UIV CTX_SAVE OP failed, slot=%d\n", 611*91f16700Schasinglulu __func__, slot); 612*91f16700Schasinglulu goto aes_keytable_save_err; 613*91f16700Schasinglulu } 614*91f16700Schasinglulu } 615*91f16700Schasinglulu 616*91f16700Schasinglulu aes_keytable_save_err: 617*91f16700Schasinglulu return ret; 618*91f16700Schasinglulu } 619*91f16700Schasinglulu 620*91f16700Schasinglulu static int tegra_se_lp_rsakeytable_context_save(tegra_se_dev_t *se_dev) 621*91f16700Schasinglulu { 622*91f16700Schasinglulu uint32_t val = 0; 623*91f16700Schasinglulu int ret = 0; 624*91f16700Schasinglulu /* For T210, First the modulus and then exponent must be 625*91f16700Schasinglulu * encrypted and saved. This is repeated for SLOT 0 626*91f16700Schasinglulu * and SLOT 1. Hence the order: 627*91f16700Schasinglulu * SLOT 0 modulus : RSA_KEY_INDEX : 1 628*91f16700Schasinglulu * SLOT 0 exponent : RSA_KEY_INDEX : 0 629*91f16700Schasinglulu * SLOT 1 modulus : RSA_KEY_INDEX : 3 630*91f16700Schasinglulu * SLOT 1 exponent : RSA_KEY_INDEX : 2 631*91f16700Schasinglulu */ 632*91f16700Schasinglulu const unsigned int key_index_mod[TEGRA_SE_RSA_KEYSLOT_COUNT][2] = { 633*91f16700Schasinglulu /* RSA key slot 0 */ 634*91f16700Schasinglulu {SE_RSA_KEY_INDEX_SLOT0_MOD, SE_RSA_KEY_INDEX_SLOT0_EXP}, 635*91f16700Schasinglulu /* RSA key slot 1 */ 636*91f16700Schasinglulu {SE_RSA_KEY_INDEX_SLOT1_MOD, SE_RSA_KEY_INDEX_SLOT1_EXP}, 637*91f16700Schasinglulu }; 638*91f16700Schasinglulu 639*91f16700Schasinglulu se_dev->dst_ll_buf->last_buff_num = 0; 640*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&( 641*91f16700Schasinglulu ((tegra_se_context_t *)se_dev-> 642*91f16700Schasinglulu ctx_save_buf)->rsa_keys))); 643*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE; 644*91f16700Schasinglulu 645*91f16700Schasinglulu for (int slot = 0; slot < TEGRA_SE_RSA_KEYSLOT_COUNT; slot++) { 646*91f16700Schasinglulu /* loop for modulus and exponent */ 647*91f16700Schasinglulu for (int index = 0; index < 2; index++) { 648*91f16700Schasinglulu for (int word_quad = 0; word_quad < 16; word_quad++) { 649*91f16700Schasinglulu val = SE_CTX_SAVE_SRC_RSA_KEYTABLE | 650*91f16700Schasinglulu SE_CTX_SAVE_RSA_KEY_INDEX( 651*91f16700Schasinglulu key_index_mod[slot][index]) | 652*91f16700Schasinglulu SE_CTX_RSA_WORD_QUAD(word_quad); 653*91f16700Schasinglulu tegra_se_write_32(se_dev, 654*91f16700Schasinglulu SE_CTX_SAVE_CONFIG_REG_OFFSET, val); 655*91f16700Schasinglulu 656*91f16700Schasinglulu /* SE context save operation */ 657*91f16700Schasinglulu ret = tegra_se_start_ctx_save_operation(se_dev, 658*91f16700Schasinglulu TEGRA_SE_KEY_128_SIZE); 659*91f16700Schasinglulu if (ret) { 660*91f16700Schasinglulu ERROR("%s: ERR: slot=%d.\n", 661*91f16700Schasinglulu __func__, slot); 662*91f16700Schasinglulu goto rsa_keytable_save_err; 663*91f16700Schasinglulu } 664*91f16700Schasinglulu 665*91f16700Schasinglulu /* Update the pointer to the next word quad */ 666*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].addr += 667*91f16700Schasinglulu TEGRA_SE_KEY_128_SIZE; 668*91f16700Schasinglulu } 669*91f16700Schasinglulu } 670*91f16700Schasinglulu } 671*91f16700Schasinglulu 672*91f16700Schasinglulu rsa_keytable_save_err: 673*91f16700Schasinglulu return ret; 674*91f16700Schasinglulu } 675*91f16700Schasinglulu 676*91f16700Schasinglulu static int tegra_se_pkakeytable_sticky_bits_save(tegra_se_dev_t *se_dev) 677*91f16700Schasinglulu { 678*91f16700Schasinglulu int ret = 0; 679*91f16700Schasinglulu 680*91f16700Schasinglulu se_dev->dst_ll_buf->last_buff_num = 0; 681*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&( 682*91f16700Schasinglulu ((tegra_se2_context_blob_t *)se_dev-> 683*91f16700Schasinglulu ctx_save_buf)->pka_ctx.sticky_bits))); 684*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_BLOCK_SIZE; 685*91f16700Schasinglulu 686*91f16700Schasinglulu /* PKA1 sticky bits are 1 AES block (16 bytes) */ 687*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, 688*91f16700Schasinglulu SE_CTX_SAVE_SRC_PKA1_STICKY_BITS | 689*91f16700Schasinglulu SE_CTX_STICKY_WORD_QUAD_WORDS_0_3); 690*91f16700Schasinglulu 691*91f16700Schasinglulu /* SE context save operation */ 692*91f16700Schasinglulu ret = tegra_se_start_ctx_save_operation(se_dev, 0); 693*91f16700Schasinglulu if (ret) { 694*91f16700Schasinglulu ERROR("%s: ERR: PKA1 sticky bits CTX_SAVE OP failed\n", 695*91f16700Schasinglulu __func__); 696*91f16700Schasinglulu goto pka_sticky_bits_save_err; 697*91f16700Schasinglulu } 698*91f16700Schasinglulu 699*91f16700Schasinglulu pka_sticky_bits_save_err: 700*91f16700Schasinglulu return ret; 701*91f16700Schasinglulu } 702*91f16700Schasinglulu 703*91f16700Schasinglulu static int tegra_se_pkakeytable_context_save(tegra_se_dev_t *se_dev) 704*91f16700Schasinglulu { 705*91f16700Schasinglulu uint32_t val = 0; 706*91f16700Schasinglulu int ret = 0; 707*91f16700Schasinglulu 708*91f16700Schasinglulu se_dev->dst_ll_buf->last_buff_num = 0; 709*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&( 710*91f16700Schasinglulu ((tegra_se2_context_blob_t *)se_dev-> 711*91f16700Schasinglulu ctx_save_buf)->pka_ctx.pka_keys))); 712*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE; 713*91f16700Schasinglulu 714*91f16700Schasinglulu /* for each slot, save word quad 0-127 */ 715*91f16700Schasinglulu for (int slot = 0; slot < TEGRA_SE_PKA1_KEYSLOT_COUNT; slot++) { 716*91f16700Schasinglulu for (int word_quad = 0; word_quad < 512/4; word_quad++) { 717*91f16700Schasinglulu val = SE_CTX_SAVE_SRC_PKA1_KEYTABLE | 718*91f16700Schasinglulu SE_CTX_PKA1_WORD_QUAD_L((slot * 128) + 719*91f16700Schasinglulu word_quad) | 720*91f16700Schasinglulu SE_CTX_PKA1_WORD_QUAD_H((slot * 128) + 721*91f16700Schasinglulu word_quad); 722*91f16700Schasinglulu tegra_se_write_32(se_dev, 723*91f16700Schasinglulu SE_CTX_SAVE_CONFIG_REG_OFFSET, val); 724*91f16700Schasinglulu 725*91f16700Schasinglulu /* SE context save operation */ 726*91f16700Schasinglulu ret = tegra_se_start_ctx_save_operation(se_dev, 727*91f16700Schasinglulu TEGRA_SE_KEY_128_SIZE); 728*91f16700Schasinglulu if (ret) { 729*91f16700Schasinglulu ERROR("%s: ERR: pka1 keytable ctx save error\n", 730*91f16700Schasinglulu __func__); 731*91f16700Schasinglulu goto pka_keytable_save_err; 732*91f16700Schasinglulu } 733*91f16700Schasinglulu 734*91f16700Schasinglulu /* Update the pointer to the next word quad */ 735*91f16700Schasinglulu se_dev->dst_ll_buf->buffer[0].addr += 736*91f16700Schasinglulu TEGRA_SE_KEY_128_SIZE; 737*91f16700Schasinglulu } 738*91f16700Schasinglulu } 739*91f16700Schasinglulu 740*91f16700Schasinglulu pka_keytable_save_err: 741*91f16700Schasinglulu return ret; 742*91f16700Schasinglulu } 743*91f16700Schasinglulu 744*91f16700Schasinglulu static int tegra_se_save_SRK(tegra_se_dev_t *se_dev) 745*91f16700Schasinglulu { 746*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, 747*91f16700Schasinglulu SE_CTX_SAVE_SRC_SRK); 748*91f16700Schasinglulu 749*91f16700Schasinglulu /* SE context save operation */ 750*91f16700Schasinglulu return tegra_se_start_ctx_save_operation(se_dev, 0); 751*91f16700Schasinglulu } 752*91f16700Schasinglulu 753*91f16700Schasinglulu /* 754*91f16700Schasinglulu * Lock both SE from non-TZ clients. 755*91f16700Schasinglulu */ 756*91f16700Schasinglulu static inline void tegra_se_lock(tegra_se_dev_t *se_dev) 757*91f16700Schasinglulu { 758*91f16700Schasinglulu uint32_t val; 759*91f16700Schasinglulu 760*91f16700Schasinglulu assert(se_dev); 761*91f16700Schasinglulu val = tegra_se_read_32(se_dev, SE_SECURITY_REG_OFFSET); 762*91f16700Schasinglulu val |= SE_SECURITY_TZ_LOCK_SOFT(SE_SECURE); 763*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_SECURITY_REG_OFFSET, val); 764*91f16700Schasinglulu } 765*91f16700Schasinglulu 766*91f16700Schasinglulu /* 767*91f16700Schasinglulu * Use SRK to encrypt SE state and save to TZRAM carveout 768*91f16700Schasinglulu */ 769*91f16700Schasinglulu static int tegra_se_context_save_sw(tegra_se_dev_t *se_dev) 770*91f16700Schasinglulu { 771*91f16700Schasinglulu int err = 0; 772*91f16700Schasinglulu 773*91f16700Schasinglulu assert(se_dev); 774*91f16700Schasinglulu 775*91f16700Schasinglulu /* Lock entire SE/SE2 as TZ protected */ 776*91f16700Schasinglulu tegra_se_lock(se_dev); 777*91f16700Schasinglulu 778*91f16700Schasinglulu INFO("%s: generate SRK\n", __func__); 779*91f16700Schasinglulu /* Generate SRK */ 780*91f16700Schasinglulu err = tegra_se_generate_srk(se_dev); 781*91f16700Schasinglulu if (err) { 782*91f16700Schasinglulu ERROR("%s: ERR: SRK generation failed\n", __func__); 783*91f16700Schasinglulu return err; 784*91f16700Schasinglulu } 785*91f16700Schasinglulu 786*91f16700Schasinglulu INFO("%s: generate random data\n", __func__); 787*91f16700Schasinglulu /* Generate random data */ 788*91f16700Schasinglulu err = tegra_se_lp_generate_random_data(se_dev); 789*91f16700Schasinglulu if (err) { 790*91f16700Schasinglulu ERROR("%s: ERR: LP random pattern generation failed\n", __func__); 791*91f16700Schasinglulu return err; 792*91f16700Schasinglulu } 793*91f16700Schasinglulu 794*91f16700Schasinglulu INFO("%s: encrypt random data\n", __func__); 795*91f16700Schasinglulu /* Encrypt the random data block */ 796*91f16700Schasinglulu err = tegra_se_lp_data_context_save(se_dev, 797*91f16700Schasinglulu ((uint64_t)(&(((tegra_se_context_t *)se_dev-> 798*91f16700Schasinglulu ctx_save_buf)->rand_data))), 799*91f16700Schasinglulu ((uint64_t)(&(((tegra_se_context_t *)se_dev-> 800*91f16700Schasinglulu ctx_save_buf)->rand_data))), 801*91f16700Schasinglulu SE_CTX_SAVE_RANDOM_DATA_SIZE); 802*91f16700Schasinglulu if (err) { 803*91f16700Schasinglulu ERROR("%s: ERR: random pattern encryption failed\n", __func__); 804*91f16700Schasinglulu return err; 805*91f16700Schasinglulu } 806*91f16700Schasinglulu 807*91f16700Schasinglulu INFO("%s: save SE sticky bits\n", __func__); 808*91f16700Schasinglulu /* Save AES sticky bits context */ 809*91f16700Schasinglulu err = tegra_se_lp_sticky_bits_context_save(se_dev); 810*91f16700Schasinglulu if (err) { 811*91f16700Schasinglulu ERROR("%s: ERR: sticky bits context save failed\n", __func__); 812*91f16700Schasinglulu return err; 813*91f16700Schasinglulu } 814*91f16700Schasinglulu 815*91f16700Schasinglulu INFO("%s: save AES keytables\n", __func__); 816*91f16700Schasinglulu /* Save AES key table context */ 817*91f16700Schasinglulu err = tegra_se_aeskeytable_context_save(se_dev); 818*91f16700Schasinglulu if (err) { 819*91f16700Schasinglulu ERROR("%s: ERR: LP keytable save failed\n", __func__); 820*91f16700Schasinglulu return err; 821*91f16700Schasinglulu } 822*91f16700Schasinglulu 823*91f16700Schasinglulu /* RSA key slot table context save */ 824*91f16700Schasinglulu INFO("%s: save RSA keytables\n", __func__); 825*91f16700Schasinglulu err = tegra_se_lp_rsakeytable_context_save(se_dev); 826*91f16700Schasinglulu if (err) { 827*91f16700Schasinglulu ERROR("%s: ERR: rsa key table context save failed\n", __func__); 828*91f16700Schasinglulu return err; 829*91f16700Schasinglulu } 830*91f16700Schasinglulu 831*91f16700Schasinglulu /* Only SE2 has an interface with PKA1; thus, PKA1's context is saved 832*91f16700Schasinglulu * via SE2. 833*91f16700Schasinglulu */ 834*91f16700Schasinglulu if (se_dev->se_num == 2) { 835*91f16700Schasinglulu /* Encrypt PKA1 sticky bits on SE2 only */ 836*91f16700Schasinglulu INFO("%s: save PKA sticky bits\n", __func__); 837*91f16700Schasinglulu err = tegra_se_pkakeytable_sticky_bits_save(se_dev); 838*91f16700Schasinglulu if (err) { 839*91f16700Schasinglulu ERROR("%s: ERR: PKA sticky bits context save failed\n", __func__); 840*91f16700Schasinglulu return err; 841*91f16700Schasinglulu } 842*91f16700Schasinglulu 843*91f16700Schasinglulu /* Encrypt PKA1 keyslots on SE2 only */ 844*91f16700Schasinglulu INFO("%s: save PKA keytables\n", __func__); 845*91f16700Schasinglulu err = tegra_se_pkakeytable_context_save(se_dev); 846*91f16700Schasinglulu if (err) { 847*91f16700Schasinglulu ERROR("%s: ERR: PKA key table context save failed\n", __func__); 848*91f16700Schasinglulu return err; 849*91f16700Schasinglulu } 850*91f16700Schasinglulu } 851*91f16700Schasinglulu 852*91f16700Schasinglulu /* Encrypt known pattern */ 853*91f16700Schasinglulu if (se_dev->se_num == 1) { 854*91f16700Schasinglulu err = tegra_se_lp_data_context_save(se_dev, 855*91f16700Schasinglulu ((uint64_t)(&se_ctx_known_pattern_data)), 856*91f16700Schasinglulu ((uint64_t)(&(((tegra_se_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))), 857*91f16700Schasinglulu SE_CTX_KNOWN_PATTERN_SIZE); 858*91f16700Schasinglulu } else if (se_dev->se_num == 2) { 859*91f16700Schasinglulu err = tegra_se_lp_data_context_save(se_dev, 860*91f16700Schasinglulu ((uint64_t)(&se_ctx_known_pattern_data)), 861*91f16700Schasinglulu ((uint64_t)(&(((tegra_se2_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))), 862*91f16700Schasinglulu SE_CTX_KNOWN_PATTERN_SIZE); 863*91f16700Schasinglulu } 864*91f16700Schasinglulu if (err) { 865*91f16700Schasinglulu ERROR("%s: ERR: save LP known pattern failure\n", __func__); 866*91f16700Schasinglulu return err; 867*91f16700Schasinglulu } 868*91f16700Schasinglulu 869*91f16700Schasinglulu /* Write lp context buffer address into PMC scratch register */ 870*91f16700Schasinglulu if (se_dev->se_num == 1) { 871*91f16700Schasinglulu /* SE context address, support T210 only */ 872*91f16700Schasinglulu mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SCRATCH43_REG_OFFSET, 873*91f16700Schasinglulu ((uint64_t)(se_dev->ctx_save_buf))); 874*91f16700Schasinglulu } else if (se_dev->se_num == 2) { 875*91f16700Schasinglulu /* SE2 & PKA1 context address */ 876*91f16700Schasinglulu mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SECURE_SCRATCH116_OFFSET, 877*91f16700Schasinglulu ((uint64_t)(se_dev->ctx_save_buf))); 878*91f16700Schasinglulu } 879*91f16700Schasinglulu 880*91f16700Schasinglulu /* Saves SRK to PMC secure scratch registers for BootROM, which 881*91f16700Schasinglulu * verifies and restores the security engine context on warm boot. 882*91f16700Schasinglulu */ 883*91f16700Schasinglulu err = tegra_se_save_SRK(se_dev); 884*91f16700Schasinglulu if (err < 0) { 885*91f16700Schasinglulu ERROR("%s: ERR: LP SRK save failure\n", __func__); 886*91f16700Schasinglulu return err; 887*91f16700Schasinglulu } 888*91f16700Schasinglulu 889*91f16700Schasinglulu INFO("%s: SE context save done \n", __func__); 890*91f16700Schasinglulu 891*91f16700Schasinglulu return err; 892*91f16700Schasinglulu } 893*91f16700Schasinglulu 894*91f16700Schasinglulu /* 895*91f16700Schasinglulu * Initialize the SE engine handle 896*91f16700Schasinglulu */ 897*91f16700Schasinglulu void tegra_se_init(void) 898*91f16700Schasinglulu { 899*91f16700Schasinglulu uint32_t val = 0; 900*91f16700Schasinglulu INFO("%s: start SE init\n", __func__); 901*91f16700Schasinglulu 902*91f16700Schasinglulu /* Generate random SRK to initialize DRBG */ 903*91f16700Schasinglulu tegra_se_generate_srk(&se_dev_1); 904*91f16700Schasinglulu 905*91f16700Schasinglulu if (tegra_chipid_is_t210_b01()) { 906*91f16700Schasinglulu tegra_se_generate_srk(&se_dev_2); 907*91f16700Schasinglulu } 908*91f16700Schasinglulu 909*91f16700Schasinglulu /* determine if ECID is valid */ 910*91f16700Schasinglulu val = mmio_read_32(TEGRA_FUSE_BASE + FUSE_JTAG_SECUREID_VALID); 911*91f16700Schasinglulu ecid_valid = (val == ECID_VALID); 912*91f16700Schasinglulu 913*91f16700Schasinglulu INFO("%s: SE init done\n", __func__); 914*91f16700Schasinglulu } 915*91f16700Schasinglulu 916*91f16700Schasinglulu static void tegra_se_enable_clocks(void) 917*91f16700Schasinglulu { 918*91f16700Schasinglulu uint32_t val = 0; 919*91f16700Schasinglulu 920*91f16700Schasinglulu /* Enable entropy clock */ 921*91f16700Schasinglulu val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W); 922*91f16700Schasinglulu val |= ENTROPY_CLK_ENB_BIT; 923*91f16700Schasinglulu mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val); 924*91f16700Schasinglulu 925*91f16700Schasinglulu /* De-Assert Entropy Reset */ 926*91f16700Schasinglulu val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W); 927*91f16700Schasinglulu val &= ~ENTROPY_RESET_BIT; 928*91f16700Schasinglulu mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W, val); 929*91f16700Schasinglulu 930*91f16700Schasinglulu /* 931*91f16700Schasinglulu * Switch SE clock source to CLK_M, to make sure SE clock 932*91f16700Schasinglulu * is on when saving SE context 933*91f16700Schasinglulu */ 934*91f16700Schasinglulu mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_RST_CTL_CLK_SRC_SE, 935*91f16700Schasinglulu SE_CLK_SRC_CLK_M); 936*91f16700Schasinglulu 937*91f16700Schasinglulu /* Enable SE clock */ 938*91f16700Schasinglulu val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V); 939*91f16700Schasinglulu val |= SE_CLK_ENB_BIT; 940*91f16700Schasinglulu mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val); 941*91f16700Schasinglulu 942*91f16700Schasinglulu /* De-Assert SE Reset */ 943*91f16700Schasinglulu val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V); 944*91f16700Schasinglulu val &= ~SE_RESET_BIT; 945*91f16700Schasinglulu mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V, val); 946*91f16700Schasinglulu } 947*91f16700Schasinglulu 948*91f16700Schasinglulu static void tegra_se_disable_clocks(void) 949*91f16700Schasinglulu { 950*91f16700Schasinglulu uint32_t val = 0; 951*91f16700Schasinglulu 952*91f16700Schasinglulu /* Disable entropy clock */ 953*91f16700Schasinglulu val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W); 954*91f16700Schasinglulu val &= ~ENTROPY_CLK_ENB_BIT; 955*91f16700Schasinglulu mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val); 956*91f16700Schasinglulu 957*91f16700Schasinglulu /* Disable SE clock */ 958*91f16700Schasinglulu val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V); 959*91f16700Schasinglulu val &= ~SE_CLK_ENB_BIT; 960*91f16700Schasinglulu mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val); 961*91f16700Schasinglulu } 962*91f16700Schasinglulu 963*91f16700Schasinglulu /* 964*91f16700Schasinglulu * Security engine power suspend entry point. 965*91f16700Schasinglulu * This function is invoked from PSCI power domain suspend handler. 966*91f16700Schasinglulu */ 967*91f16700Schasinglulu int32_t tegra_se_suspend(void) 968*91f16700Schasinglulu { 969*91f16700Schasinglulu int32_t ret = 0; 970*91f16700Schasinglulu uint32_t val = 0; 971*91f16700Schasinglulu 972*91f16700Schasinglulu /* SE does not use SMMU in EL3, disable SMMU. 973*91f16700Schasinglulu * This will be re-enabled by kernel on resume */ 974*91f16700Schasinglulu val = mmio_read_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0); 975*91f16700Schasinglulu val &= ~PPCS_SMMU_ENABLE; 976*91f16700Schasinglulu mmio_write_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0, val); 977*91f16700Schasinglulu 978*91f16700Schasinglulu tegra_se_enable_clocks(); 979*91f16700Schasinglulu 980*91f16700Schasinglulu if (tegra_chipid_is_t210_b01()) { 981*91f16700Schasinglulu /* It is T210 B01, Atomic context save se2 and pka1 */ 982*91f16700Schasinglulu INFO("%s: SE2/PKA1 atomic context save\n", __func__); 983*91f16700Schasinglulu ret = tegra_se_context_save_atomic(&se_dev_2); 984*91f16700Schasinglulu if (ret != 0) { 985*91f16700Schasinglulu ERROR("%s: SE2 ctx save failed (%d)\n", __func__, ret); 986*91f16700Schasinglulu } 987*91f16700Schasinglulu 988*91f16700Schasinglulu ret = tegra_se_context_save_atomic(&se_dev_1); 989*91f16700Schasinglulu if (ret != 0) { 990*91f16700Schasinglulu ERROR("%s: SE1 ctx save failed (%d)\n", __func__, ret); 991*91f16700Schasinglulu } 992*91f16700Schasinglulu } else { 993*91f16700Schasinglulu /* It is T210, SW context save se */ 994*91f16700Schasinglulu INFO("%s: SE1 legacy(SW) context save\n", __func__); 995*91f16700Schasinglulu ret = tegra_se_context_save_sw(&se_dev_1); 996*91f16700Schasinglulu if (ret != 0) { 997*91f16700Schasinglulu ERROR("%s: SE1 ctx save failed (%d)\n", __func__, ret); 998*91f16700Schasinglulu } 999*91f16700Schasinglulu } 1000*91f16700Schasinglulu 1001*91f16700Schasinglulu tegra_se_disable_clocks(); 1002*91f16700Schasinglulu 1003*91f16700Schasinglulu return ret; 1004*91f16700Schasinglulu } 1005*91f16700Schasinglulu 1006*91f16700Schasinglulu /* 1007*91f16700Schasinglulu * Save TZRAM to shadow TZRAM in AON 1008*91f16700Schasinglulu */ 1009*91f16700Schasinglulu int32_t tegra_se_save_tzram(void) 1010*91f16700Schasinglulu { 1011*91f16700Schasinglulu uint32_t val = 0; 1012*91f16700Schasinglulu int32_t ret = 0; 1013*91f16700Schasinglulu uint32_t timeout; 1014*91f16700Schasinglulu 1015*91f16700Schasinglulu INFO("%s: SE TZRAM save start\n", __func__); 1016*91f16700Schasinglulu tegra_se_enable_clocks(); 1017*91f16700Schasinglulu 1018*91f16700Schasinglulu val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE); 1019*91f16700Schasinglulu tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val); 1020*91f16700Schasinglulu 1021*91f16700Schasinglulu val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION); 1022*91f16700Schasinglulu for (timeout = 0; (SE_TZRAM_OP_BUSY(val) == SE_TZRAM_OP_BUSY_ON) && 1023*91f16700Schasinglulu (timeout < TIMEOUT_100MS); timeout++) { 1024*91f16700Schasinglulu mdelay(1); 1025*91f16700Schasinglulu val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION); 1026*91f16700Schasinglulu } 1027*91f16700Schasinglulu 1028*91f16700Schasinglulu if (timeout == TIMEOUT_100MS) { 1029*91f16700Schasinglulu ERROR("%s: ERR: TZRAM save timeout!\n", __func__); 1030*91f16700Schasinglulu ret = -ETIMEDOUT; 1031*91f16700Schasinglulu } 1032*91f16700Schasinglulu 1033*91f16700Schasinglulu if (ret == 0) { 1034*91f16700Schasinglulu INFO("%s: SE TZRAM save done!\n", __func__); 1035*91f16700Schasinglulu } 1036*91f16700Schasinglulu 1037*91f16700Schasinglulu tegra_se_disable_clocks(); 1038*91f16700Schasinglulu 1039*91f16700Schasinglulu return ret; 1040*91f16700Schasinglulu } 1041*91f16700Schasinglulu 1042*91f16700Schasinglulu /* 1043*91f16700Schasinglulu * The function is invoked by SE resume 1044*91f16700Schasinglulu */ 1045*91f16700Schasinglulu static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev) 1046*91f16700Schasinglulu { 1047*91f16700Schasinglulu uint32_t val; 1048*91f16700Schasinglulu 1049*91f16700Schasinglulu assert(se_dev); 1050*91f16700Schasinglulu 1051*91f16700Schasinglulu /* Lock RNG source to ENTROPY on resume */ 1052*91f16700Schasinglulu val = DRBG_RO_ENT_IGNORE_MEM_ENABLE | 1053*91f16700Schasinglulu DRBG_RO_ENT_SRC_LOCK_ENABLE | 1054*91f16700Schasinglulu DRBG_RO_ENT_SRC_ENABLE; 1055*91f16700Schasinglulu tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val); 1056*91f16700Schasinglulu 1057*91f16700Schasinglulu /* Set a random value to SRK to initialize DRBG */ 1058*91f16700Schasinglulu tegra_se_generate_srk(se_dev); 1059*91f16700Schasinglulu } 1060*91f16700Schasinglulu 1061*91f16700Schasinglulu /* 1062*91f16700Schasinglulu * The function is invoked on SC7 resume 1063*91f16700Schasinglulu */ 1064*91f16700Schasinglulu void tegra_se_resume(void) 1065*91f16700Schasinglulu { 1066*91f16700Schasinglulu tegra_se_warm_boot_resume(&se_dev_1); 1067*91f16700Schasinglulu 1068*91f16700Schasinglulu if (tegra_chipid_is_t210_b01()) { 1069*91f16700Schasinglulu tegra_se_warm_boot_resume(&se_dev_2); 1070*91f16700Schasinglulu } 1071*91f16700Schasinglulu } 1072