1 /*
2 * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 * Description: KV Storage Library flash updating module implementation
15 */
16
17 #include "nv_update.h"
18 #include "nv_store.h"
19 #include "nv_page.h"
20 #include "nv_key.h"
21 #include "nv_porting.h"
22 #include "nv_config.h"
23 #include "nv_reset.h"
24 #include "nv_nvregion.h"
25 #if defined(CONFIG_PARTITION_FEATURE_SUPPORT)
26 #include "partition.h"
27 #endif
28 #include "assert.h"
29 #include "common_def.h"
30 #include "uapi_crc.h"
31 #ifndef CONFIG_NV_SUPPORT_SINGLE_CORE_SYSTEM
32 #include "flash_task_mutex.h"
33 #include "flash_task_adapt.h"
34 #endif
35
36 #define KV_WRITE_KEY_BUFFER_SIZE NV_KEY_DATA_CHUNK_LEN
37 #define KV_WRITE_CHUNK_SIZE 32
38
39 #define assert__(x) ((void)0)
40
41 /* Pointer to current state machine being processed.
42 * Will point to the last instance in a list of kv_active_state_machine_t, or NULL if
43 * no state machine is being processed */
44 STATIC kv_active_state_machine_t *g_current_state_machine = NULL;
45
46 /* Holds a search filter being used by searching operations performed by state machines */
47 STATIC kv_key_filter_t g_search_filter;
48
49 /* Holds details of key data to be written to a store */
50 STATIC kv_key_details_t g_new_key_details;
51
52 /* The key within a KV store begin accessed by a state machine */
53 STATIC kv_key_handle_t g_current_key;
54
55 /* The flash page within a KV store begin accessed by a state machine */
56 STATIC kv_page_handle_t g_current_page;
57
58 /* The KV store begin accessed by a state machine */
59 STATIC kv_store_t g_current_store;
60
61 /* The position in a flash page where a new key will be written */
62 STATIC kv_key_location g_current_store_write_location;
63
64 /* Memory address of flash page being used to defrag an existing page into */
65 STATIC kv_page_location g_defrag_page_location = NULL;
66
67
68 STATIC errcode_t kv_create_write_buffer(uint32_t write_location, uint32_t size);
69 STATIC void kv_remove_write_buffer(void);
70 STATIC kv_managed_source_buffer_t* kv_update_source_buffer(uint8_t *data, uint32_t data_length);
71 STATIC void kv_remove_source_buffer(void);
72 STATIC void kv_release_crypto(void);
73 STATIC errcode_t begin_state_machine(const kv_state_machine_t *machine);
74 STATIC void end_state_machine(void);
75
76 /* State machine action functions */
77 STATIC kv_update_event_t kv_update_action_store_write_buffer(void);
78 STATIC kv_update_event_t kv_update_action_prime_write_buffer(void);
79
80 STATIC kv_update_event_t kv_update_action_select_first_page(void);
81 STATIC kv_update_event_t kv_update_action_select_next_page(void);
82 STATIC kv_update_event_t kv_update_action_find_first_key(void);
83 STATIC kv_update_event_t kv_update_action_find_next_key(void);
84 STATIC kv_update_event_t kv_update_action_prepare_delete_key(void);
85
86 STATIC kv_update_event_t kv_update_action_find_defrag_page(void);
87 STATIC kv_update_event_t kv_update_action_erase_defrag_page(void);
88 STATIC kv_update_event_t kv_update_action_prepare_copy_key(void);
89
90 STATIC kv_update_event_t kv_update_action_prepare_defrag_page(void);
91 STATIC kv_update_event_t kv_update_action_copy_all_keys(void);
92 STATIC kv_update_event_t kv_update_action_prepare_page_header(void);
93 STATIC kv_update_event_t kv_update_action_find_write_position(void);
94 STATIC kv_update_event_t kv_update_action_defrag_current_page(void);
95 STATIC kv_update_event_t kv_update_action_update_nvregion_map(void);
96
97 STATIC kv_update_event_t kv_update_action_prepare_store(void);
98 STATIC kv_update_event_t kv_update_action_prepare_write_key(void);
99 STATIC kv_update_event_t kv_update_action_claim_crypto(void);
100 STATIC kv_update_event_t kv_update_action_erase_old_keys(void);
101
102 STATIC kv_update_event_t kv_update_action_update_nvmap_for_erase_key(void);
103 STATIC kv_update_event_t kv_update_action_update_nvmap_for_new_key(void);
104
105
106 /* State machine to iterate through keys in a store marking all keys as erased
107 * - Uses a pre-configured filter to determine which keys to select for erasing
108 */
109 static const kv_update_transition_t g_erase_keys_transitions[] = {
110 {.state = STATE_SELECT_FIRST_PAGE, .event = EVENT_PAGE_SELECTED, .next_state = STATE_FIND_FIRST_KEY },
111 {.state = STATE_SELECT_FIRST_PAGE, .event = EVENT_PAGE_NOT_SELECTED, .next_state = STATE_EXIT },
112 {.state = STATE_SELECT_NEXT_PAGE, .event = EVENT_PAGE_SELECTED, .next_state = STATE_FIND_FIRST_KEY },
113 {.state = STATE_SELECT_NEXT_PAGE, .event = EVENT_PAGE_NOT_SELECTED, .next_state = STATE_EXIT },
114 {.state = STATE_FIND_FIRST_KEY, .event = EVENT_KEY_FOUND, .next_state = STATE_PREP_DELETE_KEY },
115 {.state = STATE_FIND_FIRST_KEY, .event = EVENT_KEY_NOT_FOUND, .next_state = STATE_SELECT_NEXT_PAGE},
116 {.state = STATE_FIND_NEXT_KEY, .event = EVENT_KEY_FOUND, .next_state = STATE_PREP_DELETE_KEY },
117 {.state = STATE_FIND_NEXT_KEY, .event = EVENT_KEY_NOT_FOUND, .next_state = STATE_SELECT_NEXT_PAGE},
118 {.state = STATE_PREP_DELETE_KEY, .event = EVENT_WRITE_BUFFER_PRIMED, .next_state = STATE_PERFORM_WRITE },
119 {.state = STATE_PERFORM_WRITE, .event = EVENT_WRITE_BUFFER_STORED, .next_state = STATE_UPDATE_MAP_FOR_OLD },
120 {.state = STATE_UPDATE_MAP_FOR_OLD, .event = EVENT_PAGE_MAP_UPDATED, .next_state = STATE_FIND_NEXT_KEY },
121 {.state = STATE_PERFORM_WRITE, .event = EVENT_SUSPEND, .next_state = STATE_SUSPENDED },
122 {.state = STATE_EXIT, .event = EVENT_NONE, .next_state = STATE_EXIT }
123 };
124 static const kv_update_action_t g_erase_keys_actions[] = {
125 {.state = STATE_SELECT_FIRST_PAGE, .action = kv_update_action_select_first_page },
126 {.state = STATE_SELECT_NEXT_PAGE, .action = kv_update_action_select_next_page },
127 {.state = STATE_FIND_FIRST_KEY, .action = kv_update_action_find_first_key },
128 {.state = STATE_FIND_NEXT_KEY, .action = kv_update_action_find_next_key },
129 {.state = STATE_PREP_DELETE_KEY, .action = kv_update_action_prepare_delete_key},
130 {.state = STATE_PERFORM_WRITE, .action = kv_update_action_store_write_buffer},
131 {.state = STATE_UPDATE_MAP_FOR_OLD, .action = kv_update_action_update_nvmap_for_erase_key},
132 {.state = STATE_EXIT, .action = NULL }
133 };
134 static const kv_state_machine_t g_erase_keys_machine = {
135 .initial_state = STATE_SELECT_FIRST_PAGE,
136 .resume_state = STATE_PERFORM_WRITE,
137 .exit_event = EVENT_KEYS_ERASED,
138 .transition_table = g_erase_keys_transitions,
139 .action_table = g_erase_keys_actions,
140 .write_buffer_size = sizeof(uint32_t)
141 };
142
143 /*
144 * State machine to locate a defrag page
145 * - Scans NV Region for an unused page of flash
146 * - Sets g_defrag_page_location to point the unused flash page, thus declaring it to be the current defrag page
147 * - Ensures defrag page is erased
148 */
149 static const kv_update_transition_t g_prep_defrag_page_transitions[] = {
150 {.state = STATE_FIND_DEFRAG, .event = EVENT_DEFRAG_FOUND, .next_state = STATE_ERASE_DEFRAG},
151 {.state = STATE_ERASE_DEFRAG, .event = EVENT_DEFRAG_ERASED, .next_state = STATE_EXIT },
152 {.state = STATE_ERASE_DEFRAG, .event = EVENT_SUSPEND, .next_state = STATE_SUSPENDED },
153 {.state = STATE_EXIT, .event = EVENT_NONE, .next_state = STATE_EXIT }
154 };
155 static const kv_update_action_t g_prep_defrag_page_actions[] = {
156 {.state = STATE_FIND_DEFRAG, .action = kv_update_action_find_defrag_page },
157 {.state = STATE_ERASE_DEFRAG, .action = kv_update_action_erase_defrag_page},
158 {.state = STATE_EXIT, .action = NULL }
159 };
160 static const kv_state_machine_t g_prep_defrag_page_machine = {
161 .initial_state = STATE_FIND_DEFRAG,
162 .resume_state = STATE_ERASE_DEFRAG,
163 .exit_event = EVENT_DEFRAG_PREPARED,
164 .transition_table = g_prep_defrag_page_transitions,
165 .action_table = g_prep_defrag_page_actions,
166 .write_buffer_size = 0
167 };
168
169 /*
170 * State machine to copy keys from the current page to the defrag page.
171 * Keys to copy are selected by a pre-configured filter, contained in g_search_filter
172 * Current page identified by g_current_page
173 * Defrag page identified by g_defrag_page_location
174 */
175 static const kv_update_transition_t g_copy_all_keys_transitions[] = {
176 {.state = STATE_FIND_FIRST_KEY, .event = EVENT_KEY_FOUND, .next_state = STATE_PREP_COPY_KEY },
177 {.state = STATE_FIND_FIRST_KEY, .event = EVENT_KEY_NOT_FOUND, .next_state = STATE_EXIT },
178 {.state = STATE_FIND_NEXT_KEY, .event = EVENT_KEY_FOUND, .next_state = STATE_PREP_COPY_KEY },
179 {.state = STATE_FIND_NEXT_KEY, .event = EVENT_KEY_NOT_FOUND, .next_state = STATE_EXIT },
180 {.state = STATE_PREP_COPY_KEY, .event = EVENT_COPY_KEY_READY, .next_state = STATE_PRIME_WRITE },
181 {.state = STATE_PRIME_WRITE, .event = EVENT_WRITE_BUFFER_PRIMED, .next_state = STATE_PERFORM_WRITE },
182 {.state = STATE_PRIME_WRITE, .event = EVENT_WRITE_DATA_EXHAUSTED, .next_state = STATE_FIND_NEXT_KEY },
183 {.state = STATE_PERFORM_WRITE, .event = EVENT_WRITE_BUFFER_STORED, .next_state = STATE_PRIME_WRITE },
184 {.state = STATE_PERFORM_WRITE, .event = EVENT_SUSPEND, .next_state = STATE_SUSPENDED },
185 {.state = STATE_EXIT, .event = EVENT_NONE, .next_state = STATE_EXIT }
186 };
187 static const kv_update_action_t g_copy_all_keys_actions[] = {
188 {.state = STATE_FIND_FIRST_KEY, .action = kv_update_action_find_first_key },
189 {.state = STATE_FIND_NEXT_KEY, .action = kv_update_action_find_next_key },
190 {.state = STATE_PREP_COPY_KEY, .action = kv_update_action_prepare_copy_key },
191 {.state = STATE_PRIME_WRITE, .action = kv_update_action_prime_write_buffer},
192 {.state = STATE_PERFORM_WRITE, .action = kv_update_action_store_write_buffer},
193 {.state = STATE_EXIT, .action = NULL }
194 };
195 static const kv_state_machine_t g_copy_all_keys_machine = {
196 .initial_state = STATE_FIND_FIRST_KEY,
197 .resume_state = STATE_PERFORM_WRITE,
198 .exit_event = EVENT_ALL_KEYS_COPIED,
199 .transition_table = g_copy_all_keys_transitions,
200 .action_table = g_copy_all_keys_actions,
201 .write_buffer_size = KV_WRITE_KEY_BUFFER_SIZE
202 };
203
204 /*
205 * State machine to defrag a store page
206 * Makes use of the g_prep_defrag_page_machine state machine
207 * Makes use of the g_copy_all_keys_machine state machine
208 * - Locates an unused flash page in the NV Region and erased it
209 * - Copies valid keys from page being defragged to the new page
210 * - Writes an updated page header to top of the new page, so that it supercedes the recently defragged page
211 */
212 static const kv_update_transition_t g_defrag_page_transitions[] = {
213 {.state = STATE_PREP_DEFRAG, .event = EVENT_DEFRAG_PREPARED, .next_state = STATE_COPY_ALL_KEYS },
214 {.state = STATE_COPY_ALL_KEYS, .event = EVENT_ALL_KEYS_COPIED, .next_state = STATE_PREP_PAGE_HEADER },
215 {.state = STATE_PREP_PAGE_HEADER, .event = EVENT_WRITE_BUFFER_PRIMED, .next_state = STATE_WRITE_PAGE_HEADER },
216 {.state = STATE_WRITE_PAGE_HEADER, .event = EVENT_WRITE_BUFFER_STORED, .next_state = STATE_UPDATE_NVREGION_MAP},
217 {.state = STATE_WRITE_PAGE_HEADER, .event = EVENT_SUSPEND, .next_state = STATE_SUSPENDED },
218 {.state = STATE_UPDATE_NVREGION_MAP, .event = EVENT_NVREGION_MAP_UPDATED, .next_state = STATE_EXIT },
219 {.state = STATE_EXIT, .event = EVENT_NONE, .next_state = STATE_EXIT }
220 };
221 static const kv_update_action_t g_defrag_page_actions[] = {
222 {.state = STATE_PREP_DEFRAG, .action = kv_update_action_prepare_defrag_page},
223 {.state = STATE_COPY_ALL_KEYS, .action = kv_update_action_copy_all_keys },
224 {.state = STATE_PREP_PAGE_HEADER, .action = kv_update_action_prepare_page_header},
225 {.state = STATE_WRITE_PAGE_HEADER, .action = kv_update_action_store_write_buffer },
226 {.state = STATE_UPDATE_NVREGION_MAP, .action = kv_update_action_update_nvregion_map},
227 {.state = STATE_EXIT, .action = NULL }
228 };
229 static const kv_state_machine_t g_defrag_page_machine = {
230 .initial_state = STATE_PREP_DEFRAG,
231 .resume_state = STATE_WRITE_PAGE_HEADER,
232 .exit_event = EVENT_DEFRAG_COMPLETE,
233 .transition_table = g_defrag_page_transitions,
234 .action_table = g_defrag_page_actions,
235 .write_buffer_size = sizeof(kv_page_header_t)
236 };
237
238 /*
239 * State machine to prepare a store for writing a key
240 * May make use of the g_defrag_page_machine state machine
241 * - Selects a suitable page in a store for receiving a key
242 * - Kicks off a defrag of the selected page if necessary
243 */
244 static const kv_update_transition_t g_prepare_store_transitions[] = {
245 {.state = STATE_FIND_WRITE_POS, .event = EVENT_DEFRAG_REQUIRED, .next_state = STATE_DEFRAG_PAGE },
246 {.state = STATE_FIND_WRITE_POS, .event = EVENT_WRITE_POS_FOUND, .next_state = STATE_EXIT },
247 {.state = STATE_DEFRAG_PAGE, .event = EVENT_DEFRAG_COMPLETE, .next_state = STATE_FIND_WRITE_POS},
248 {.state = STATE_EXIT, .event = EVENT_NONE, .next_state = STATE_EXIT }
249 };
250 static const kv_update_action_t g_prepare_store_actions[] = {
251 {.state = STATE_FIND_WRITE_POS, .action = kv_update_action_find_write_position},
252 {.state = STATE_DEFRAG_PAGE, .action = kv_update_action_defrag_current_page},
253 {.state = STATE_EXIT, .action = NULL }
254 };
255 STATIC const kv_state_machine_t g_prepare_store_machine = {
256 .initial_state = STATE_FIND_WRITE_POS,
257 .resume_state = STATE_INVALID,
258 .exit_event = EVENT_STORE_READY,
259 .transition_table = g_prepare_store_transitions,
260 .action_table = g_prepare_store_actions,
261 .write_buffer_size = 0
262 };
263
264 /*
265 * State machine to write a key to a store
266 * Makes use of the g_prepare_store_machine state machine
267 */
268 static const kv_update_transition_t g_write_key_transitions[] = {
269 {.state = STATE_PREP_STORE, .event = EVENT_STORE_READY, .next_state = STATE_PREP_KEY_DATA },
270 {.state = STATE_PREP_KEY_DATA, .event = EVENT_KEY_UPDATE_NOT_NEEDED, .next_state = STATE_EXIT },
271 {.state = STATE_PREP_KEY_DATA, .event = EVENT_KEY_DATA_READY, .next_state = STATE_CLAIM_CRYPTO },
272 {.state = STATE_CLAIM_CRYPTO, .event = EVENT_CRYPTO_CLAIMED, .next_state = STATE_PRIME_WRITE },
273 {.state = STATE_PRIME_WRITE, .event = EVENT_WRITE_BUFFER_PRIMED, .next_state = STATE_PERFORM_WRITE },
274 {.state = STATE_PRIME_WRITE, .event = EVENT_WRITE_DATA_EXHAUSTED, .next_state = STATE_UPDATE_MAP_FOR_NEW},
275 {.state = STATE_UPDATE_MAP_FOR_NEW, .event = EVENT_PAGE_MAP_UPDATED, .next_state = STATE_ERASE_OLD_KEYS },
276 {.state = STATE_PERFORM_WRITE, .event = EVENT_WRITE_BUFFER_STORED, .next_state = STATE_PRIME_WRITE },
277 {.state = STATE_PERFORM_WRITE, .event = EVENT_SUSPEND, .next_state = STATE_SUSPENDED },
278 {.state = STATE_ERASE_OLD_KEYS, .event = EVENT_KEYS_ERASED, .next_state = STATE_EXIT },
279 {.state = STATE_EXIT, .event = EVENT_NONE, .next_state = STATE_EXIT }
280 };
281 static const kv_update_action_t g_write_key_actions[] = {
282 {.state = STATE_PREP_STORE, .action = kv_update_action_prepare_store },
283 {.state = STATE_PREP_KEY_DATA, .action = kv_update_action_prepare_write_key },
284 {.state = STATE_CLAIM_CRYPTO, .action = kv_update_action_claim_crypto },
285 {.state = STATE_PRIME_WRITE, .action = kv_update_action_prime_write_buffer},
286 {.state = STATE_PERFORM_WRITE, .action = kv_update_action_store_write_buffer},
287 {.state = STATE_UPDATE_MAP_FOR_NEW, .action = kv_update_action_update_nvmap_for_new_key},
288 {.state = STATE_ERASE_OLD_KEYS, .action = kv_update_action_erase_old_keys },
289 {.state = STATE_EXIT, .action = NULL }
290 };
291 static const kv_state_machine_t g_write_key_machine = {
292 .initial_state = STATE_PREP_STORE,
293 .resume_state = STATE_CLAIM_CRYPTO,
294 .exit_event = EVENT_WRITE_COMPLETE,
295 .transition_table = g_write_key_transitions,
296 .action_table = g_write_key_actions,
297 .write_buffer_size = KV_WRITE_KEY_BUFFER_SIZE
298 };
299
300 /* Attempts to write all data in a write buffer to flash */
kv_update_action_store_write_buffer(void)301 STATIC kv_update_event_t kv_update_action_store_write_buffer(void)
302 {
303 errcode_t ret;
304 uint32_t read_location;
305 uint32_t write_length;
306 uint32_t written;
307
308 kv_managed_write_buffer_t *write_buffer = g_current_state_machine->write_buffer;
309
310 /* Skip over section of write buffer that has already been written, due to being suspended */
311 if (write_buffer->write_location < write_buffer->resume_location) {
312 uint32_t data_already_written = write_buffer->resume_location - write_buffer->write_location;
313 if (data_already_written > write_buffer->data_length) {
314 data_already_written = write_buffer->data_length;
315 }
316 write_buffer->data_consumed += (uint16_t)data_already_written;
317 write_buffer->write_location += data_already_written;
318 }
319
320 /* Attempt to write chunks of data from the write_buffer to flash */
321 while (write_buffer->data_consumed < write_buffer->data_length) {
322 /* Determine size and position of chunk to write */
323 read_location = (uint32_t)(uintptr_t)write_buffer->data + write_buffer->data_consumed;
324 write_length = uapi_min(KV_WRITE_CHUNK_SIZE, write_buffer->data_length - write_buffer->data_consumed);
325
326 /* Attempt to write chunk */
327 ret = kv_key_write_flash(write_buffer->write_location, write_length, (uint8_t *)(uintptr_t)read_location);
328 if (ret == ERRCODE_SUCC) {
329 written = write_length;
330 } else {
331 written = 0;
332 }
333 write_buffer->data_consumed += (uint16_t)written;
334 /* Update write position, automatically allowing for subsequent writes to occur sequentially in flash */
335 write_buffer->write_location += written;
336 if (written < write_length) {
337 /* Write was aborted early */
338 write_buffer->resume_location = write_buffer->write_location;
339 return EVENT_SUSPEND;
340 }
341 }
342 /* Write completed */
343 return EVENT_WRITE_BUFFER_STORED;
344 }
kv_update_hash_encrypt_chunk(kv_managed_write_buffer_t * write_buffer,const kv_managed_source_buffer_t * source_buffer,uint32_t chunk_dest,uint32_t chunk_len)345 STATIC errcode_t kv_update_hash_encrypt_chunk(kv_managed_write_buffer_t *write_buffer,
346 const kv_managed_source_buffer_t *source_buffer, uint32_t chunk_dest, uint32_t chunk_len)
347 {
348 /* Need to cal CRC? */
349 errcode_t ret = ERRCODE_SUCC;
350 if (source_buffer->crc_data) {
351 if (!write_buffer->crc_claimed) {
352 return ERRCODE_NV_HASH_UNAVAILABLE;
353 }
354 #if (defined(CONFIG_NV_SUPPORT_CRC16_VERIFY) && (CONFIG_NV_SUPPORT_CRC16_VERIFY == NV_YES))
355 write_buffer->crc_ret =
356 (uint16_t)uapi_crc16((uint16_t)write_buffer->crc_ret, (const uint8_t *)(uintptr_t)chunk_dest, chunk_len);
357 #else
358 write_buffer->crc_ret = uapi_crc32(write_buffer->crc_ret, (const uint8_t *)(uintptr_t)chunk_dest, chunk_len);
359 #endif
360 }
361
362 #if (CONFIG_NV_SUPPORT_ENCRYPT == NV_YES)
363 /* Need to hash data? */
364 if (source_buffer->hash_data) {
365 if (!write_buffer->hash_claimed) {
366 return ERRCODE_NV_HASH_UNAVAILABLE;
367 }
368 nv_crypto_update_hash((const uint8_t *)(uintptr_t)chunk_dest, chunk_len);
369 }
370
371 /* Need to encrypt data? */
372 if (source_buffer->encrypt_data) {
373 if (!write_buffer->encrypt_claimed) {
374 return ERRCODE_NV_AES_UNAVAILABLE;
375 }
376
377 ret = nv_crypto_encode(write_buffer->crypto_handle, (uintptr_t)chunk_dest, (uintptr_t)chunk_dest, chunk_len);
378 }
379
380 if (source_buffer->gcm_tag_data || source_buffer->hash_data) {
381 if ((source_buffer->gcm_tag_data && !write_buffer->gcm_tag_claimed) ||
382 (source_buffer->hash_data && !write_buffer->hash_claimed)) {
383 return ERRCODE_NV_HASH_UNAVAILABLE;
384 }
385 #if (defined(CONFIG_NV_SUPPORT_CRC16_VERIFY) && (CONFIG_NV_SUPPORT_CRC16_VERIFY == NV_YES))
386 write_buffer->crc_ret =
387 (uint16_t)uapi_crc16((uint16_t)write_buffer->crc_ret, (const uint8_t *)(uintptr_t)chunk_dest, chunk_len);
388 #else
389 write_buffer->crc_ret = uapi_crc32(write_buffer->crc_ret, (const uint8_t *)(uintptr_t)chunk_dest, chunk_len);
390 #endif
391 }
392 #endif
393 return ret;
394 }
395
396 /* Copy data from the selected source buffer to the write_buffer, hashing and encrypting the data as necessary */
prime_next_chunk(void)397 STATIC errcode_t prime_next_chunk(void)
398 {
399 kv_managed_source_buffer_t *source_buffer = g_current_state_machine->current_source;
400 kv_managed_write_buffer_t *write_buffer = g_current_state_machine->write_buffer;
401
402 uint32_t remaining_dest_space = write_buffer->size - write_buffer->data_length;
403 uint32_t remaining_src_data = source_buffer->data_length - source_buffer->data_consumed;
404 uint16_t chunk_len = (uint16_t)uapi_min(remaining_dest_space, remaining_src_data);
405
406 uint32_t chunk_dest = (uint32_t)(uintptr_t)write_buffer->data + write_buffer->data_length;
407 uint32_t chunk_src = (uint32_t)(uintptr_t)source_buffer->data + source_buffer->data_consumed;
408
409 #if (CONFIG_NV_SUPPORT_ENCRYPT == NV_YES)
410 if (source_buffer->encrypt_data) {
411 /* For encrypted data, the length of each operation is the same as that of NV reading.
412 * CNCommont: 对于加密数据,每次操作的数据长度与读NV时保持一致(KEY_DATA_CHUNK_LEN),因此当write_buffer剩余空间
413 * 小于KEY_DATA_CHUNK_LEN时,暂不处理.
414 */
415 chunk_len = (uint16_t)uapi_min(chunk_len, NV_KEY_DATA_CHUNK_LEN);
416 if (remaining_dest_space < NV_KEY_DATA_CHUNK_LEN) {
417 return ERRCODE_NV_BUFFER_PRIMED_PREMATURELY;
418 }
419 }
420 #endif
421
422 kv_attributes_t attributes = (source_buffer->encrypt_data) ? KV_ATTRIBUTE_ENCRYPTED : 0;
423 if (remaining_dest_space < kv_key_padded_data_length(attributes, chunk_len)) {
424 /* Not enough space for padded data, exit early and write what we have */
425 return ERRCODE_NV_BUFFER_PRIMED_PREMATURELY;
426 }
427
428 /* Copy a chunk of source_buffer data to write buffer */
429 if ((chunk_src >= FLASH_PHYSICAL_ADDR_START) &&
430 (chunk_src + chunk_len) <= FLASH_PHYSICAL_ADDR_END) {
431 /* Source buffer is referencing data in Flash */
432 errcode_t res = kv_key_helper_copy_flash(chunk_dest, chunk_src, chunk_len);
433 if (res != ERRCODE_SUCC) {
434 return res;
435 }
436 } else {
437 int32_t result;
438 /* Source buffer is referencing data in RAM */
439 result = memcpy_s((void *)(uintptr_t)chunk_dest, remaining_dest_space,
440 (const void *)(uintptr_t)chunk_src, chunk_len);
441 if (result != (int32_t)EOK) {
442 assert__(false);
443 }
444 }
445
446 /* Round chunk_len up to a multiple of 4 or 16 bytes, depending upon the encryption requirement */
447 /* We have already checked there is enough space in the write buffer */
448 chunk_len = kv_key_padded_data_length(attributes, chunk_len);
449
450 errcode_t hash_crypt_ret;
451 hash_crypt_ret = kv_update_hash_encrypt_chunk(write_buffer, source_buffer, chunk_dest, chunk_len);
452 if (hash_crypt_ret != ERRCODE_SUCC) {
453 return hash_crypt_ret;
454 }
455
456 write_buffer->data_length += chunk_len;
457 source_buffer->data_consumed += chunk_len;
458 return ERRCODE_SUCC;
459 }
460
461 /* Select next source buffer, filling it with a hash if required */
select_next_source_buffer(void)462 STATIC errcode_t select_next_source_buffer(void)
463 {
464 kv_managed_source_buffer_t *source_buffer = g_current_state_machine->current_source->next;
465 kv_managed_write_buffer_t *write_buffer = g_current_state_machine->write_buffer;
466
467 g_current_state_machine->current_source = source_buffer;
468
469 /* Has the next source_buffer buffer been designated to receive the hash calculation */
470 if ((source_buffer != NULL) && (source_buffer->receive_hash)) {
471 if (write_buffer->crc_claimed) {
472 if (source_buffer->data_length < KV_CRYPTO_CRC_SIZE) {
473 return ERRCODE_NV_KEY_HASH_BUFFER_TOO_SMALL;
474 }
475
476 write_buffer->crc_ret = kv_crc32_swap(write_buffer->crc_ret);
477 (void)memcpy_s((void *)source_buffer->data, KV_CRYPTO_CRC_SIZE, (const void *)&write_buffer->crc_ret,
478 KV_CRYPTO_CRC_SIZE);
479 write_buffer->crc_claimed = false;
480 source_buffer->data_length = KV_CRYPTO_CRC_SIZE;
481 #if (CONFIG_NV_SUPPORT_ENCRYPT == NV_YES)
482 } else if (write_buffer->hash_claimed) {
483 if (source_buffer->data_length < KV_CRYPTO_HASH_SIZE) {
484 return ERRCODE_NV_KEY_HASH_BUFFER_TOO_SMALL;
485 }
486 (void)memset_s(source_buffer->data, source_buffer->data_length, 0, source_buffer->data_length);
487 nv_crypto_complete_hash(source_buffer->data);
488
489 write_buffer->crc_ret = kv_crc32_swap(write_buffer->crc_ret);
490 (void)memcpy_s((void *)(source_buffer->data + KV_CRYPTO_HASH_SIZE - KV_CRYPTO_CRC_SIZE),
491 KV_CRYPTO_CRC_SIZE, (const void *)&write_buffer->crc_ret, KV_CRYPTO_CRC_SIZE);
492
493 write_buffer->hash_claimed = false;
494 source_buffer->data_length = KV_CRYPTO_HASH_SIZE;
495 } else if (write_buffer->gcm_tag_claimed) {
496 uint32_t tag_len = NV_AES_GCM_TAG_LENGTH;
497 if (source_buffer->data_length < KV_CRYPTO_HASH_SIZE) {
498 return ERRCODE_NV_KEY_HASH_BUFFER_TOO_SMALL;
499 }
500
501 write_buffer->crc_ret = kv_crc32_swap(write_buffer->crc_ret);
502 memset_s(source_buffer->data, source_buffer->data_length, 0, source_buffer->data_length);
503
504 nv_crypto_get_tag(write_buffer->crypto_handle, source_buffer->data, &tag_len);
505
506 (void)memcpy_s((void *)(source_buffer->data + KV_CRYPTO_HASH_SIZE - KV_CRYPTO_CRC_SIZE),
507 KV_CRYPTO_CRC_SIZE, (const void *)&write_buffer->crc_ret, KV_CRYPTO_CRC_SIZE);
508
509 write_buffer->gcm_tag_claimed = false;
510 source_buffer->data_length = KV_CRYPTO_HASH_SIZE;
511 #endif
512 } else {
513 return ERRCODE_NV_HASH_UNAVAILABLE;
514 }
515 }
516
517 return ERRCODE_SUCC;
518 }
519
520 /* Gather data from one or more source buffers to fill write_buffer with more data ready for writing to flash */
kv_update_action_prime_write_buffer(void)521 STATIC kv_update_event_t kv_update_action_prime_write_buffer(void)
522 {
523 errcode_t res;
524
525 kv_managed_source_buffer_t *source_buffer = g_current_state_machine->current_source;
526 kv_managed_write_buffer_t *write_buffer = g_current_state_machine->write_buffer;
527
528 (void)memset_s(write_buffer->data, write_buffer->size, 0, write_buffer->size);
529
530 write_buffer->data_length = 0;
531 write_buffer->data_consumed = 0;
532
533 while ((source_buffer != NULL) && (write_buffer->data_length < write_buffer->size)) {
534 if (source_buffer->data_consumed < source_buffer->data_length) {
535 res = prime_next_chunk();
536 if (res == ERRCODE_NV_BUFFER_PRIMED_PREMATURELY) {
537 /* Not enough space for padded data so just write what we have for now */
538 break;
539 }
540 if (res != ERRCODE_SUCC) {
541 g_current_state_machine->error_code = res;
542 return EVENT_ERROR;
543 }
544 } else {
545 /* Select next source buffer */
546 res = select_next_source_buffer();
547 if (res != ERRCODE_SUCC) {
548 g_current_state_machine->error_code = res;
549 return EVENT_ERROR;
550 }
551 source_buffer = g_current_state_machine->current_source;
552 }
553 }
554
555 /* Did we manage to place any data in the write buffer? */
556 if (write_buffer->data_length == 0) {
557 kv_release_crypto();
558 return EVENT_WRITE_DATA_EXHAUSTED;
559 } else {
560 return EVENT_WRITE_BUFFER_PRIMED;
561 }
562 }
563
564 /*
565 * Attempts to obtain a handle to the first page of the g_current_store
566 * - g_current_page will be populated if a page is obtained
567 */
kv_update_action_select_first_page(void)568 STATIC kv_update_event_t kv_update_action_select_first_page(void)
569 {
570 errcode_t res;
571
572 res = kv_store_get_page_handle(g_current_store, 0, &g_current_page);
573 if (res == ERRCODE_SUCC) {
574 return EVENT_PAGE_SELECTED;
575 }
576 return EVENT_PAGE_NOT_SELECTED;
577 }
578
579 /*
580 * Attempts to obtain a handle to the next page of the g_current_store, based on g_current_page
581 * - g_current_page will be updated if a page is obtained
582 */
kv_update_action_select_next_page(void)583 STATIC kv_update_event_t kv_update_action_select_next_page(void)
584 {
585 errcode_t res;
586 uint32_t page_index;
587 if (kv_page_get_index(&g_current_page, &page_index) != ERRCODE_SUCC) {
588 return EVENT_PAGE_NOT_SELECTED;
589 }
590 page_index++;
591 if (page_index < kv_store_get_page_count(g_current_store)) {
592 res = kv_store_get_page_handle(g_current_store, page_index, &g_current_page);
593 if (res == ERRCODE_SUCC) {
594 return EVENT_PAGE_SELECTED;
595 }
596 }
597
598 return EVENT_PAGE_NOT_SELECTED;
599 }
600
601 /*
602 * Attempts to obtain the first key in the g_current_page, that conforms to g_search_filter
603 * - g_current_key will be populated if a key is obtained
604 */
kv_update_action_find_first_key(void)605 STATIC kv_update_event_t kv_update_action_find_first_key(void)
606 {
607 errcode_t res;
608
609 res = kv_page_find_first_key(&g_current_page, &g_search_filter, &g_current_key);
610 while (res == ERRCODE_SUCC) {
611 if (g_current_key.key_location != g_search_filter.location) {
612 return EVENT_KEY_FOUND;
613 }
614 res = kv_page_find_next_key(&g_current_page, &g_search_filter, &g_current_key);
615 }
616 return EVENT_KEY_NOT_FOUND;
617 }
618
619 /*
620 * Attempts to obtain the next key in the g_current_page, based on g_current_key and that conforms to g_search_filter
621 * - g_current_key will be updated if a key is obtained
622 */
kv_update_action_find_next_key(void)623 STATIC kv_update_event_t kv_update_action_find_next_key(void)
624 {
625 errcode_t res;
626
627 res = kv_page_find_next_key(&g_current_page, &g_search_filter, &g_current_key);
628 while (res == ERRCODE_SUCC) {
629 if (g_current_key.key_location != g_search_filter.location) {
630 return EVENT_KEY_FOUND;
631 }
632 res = kv_page_find_next_key(&g_current_page, &g_search_filter, &g_current_key);
633 }
634 return EVENT_KEY_NOT_FOUND;
635 }
636
637 /*
638 * Prepares the g_current_state_machine->write_buffer to mark g_current_key as invalid (and hence erased)
639 * Will not mark keys with the attribute KV_ATTRIBUTE_PERMANENT as invalid
640 */
kv_update_action_prepare_delete_key(void)641 STATIC kv_update_event_t kv_update_action_prepare_delete_key(void)
642 {
643 kv_managed_write_buffer_t *write_buffer = NULL;
644 kv_attributes_t attributes = kv_key_attributes(&g_current_key);
645 if (((uint32_t)attributes & (uint32_t)KV_ATTRIBUTE_PERMANENT) != 0 &&
646 (g_new_key_details.focre_write != true)) {
647 g_current_state_machine->error_code = ERRCODE_NV_TRYING_TO_MODIFY_A_PERMANENT_KEY;
648 return EVENT_ERROR;
649 }
650
651 write_buffer = g_current_state_machine->write_buffer;
652 if (write_buffer == NULL) {
653 g_current_state_machine->error_code = ERRCODE_NV_WRITE_BUFFER_NOT_ALLOCATED;
654 return EVENT_ERROR;
655 }
656 if (write_buffer->size < sizeof(uint32_t)) {
657 g_current_state_machine->error_code = ERRCODE_NV_WRITE_BUFFER_TOO_SMALL;
658 return EVENT_ERROR;
659 }
660
661 /* As only 4 bytes are being written, directly prime write buffer rather that setting up a source buffer */
662 *(uint32_t *)write_buffer->data = KV_KEY_INVALID;
663 write_buffer->data_consumed = 0;
664
665 if (g_nv_header_magic == KV_KEY_MAGIC) {
666 write_buffer->data_length = (uint16_t)sizeof(uint8_t);
667 /* Need to manually configure the write location for each key being deleted */
668 /* as they will not follow in an ordered sequence */
669 write_buffer->write_location = (uint32_t)(uintptr_t)((uint8_t *)g_current_key.key_location + sizeof(uint8_t));
670 } else {
671 write_buffer->data_length = (uint16_t)sizeof(uint16_t);
672 write_buffer->write_location = (uint32_t)(uintptr_t)g_current_key.key_location;
673 }
674
675 return EVENT_WRITE_BUFFER_PRIMED;
676 }
677
kv_update_action_prepare_copy_key(void)678 STATIC kv_update_event_t kv_update_action_prepare_copy_key(void)
679 {
680 kv_managed_write_buffer_t *write_buffer = g_current_state_machine->write_buffer;
681 if (write_buffer == NULL) {
682 g_current_state_machine->error_code = ERRCODE_NV_WRITE_BUFFER_NOT_ALLOCATED;
683 return EVENT_ERROR;
684 }
685 if (write_buffer->write_location == 0) {
686 write_buffer->write_location = (uint32_t)(uintptr_t)g_defrag_page_location + (uint32_t)sizeof(kv_page_header_t);
687 }
688
689 /* Use a source buffer to manage copy progress of key data */
690 uint8_t *source = (uint8_t *)g_current_key.key_location;
691 uint16_t length = kv_key_flash_size(&g_current_key);
692 kv_managed_source_buffer_t *source_buffer = kv_update_source_buffer(source, length);
693 g_current_state_machine->current_source = source_buffer; /* Point current source buffer at first buffer */
694 if (source_buffer == NULL) {
695 g_current_state_machine->error_code = ERRCODE_MALLOC;
696 return EVENT_ERROR;
697 }
698
699 return EVENT_COPY_KEY_READY;
700 }
701
702 /*
703 * Attempts to locate an unused page of flash, in the KV NV flash region, to use as the current defrag page
704 * g_defrag_page_location will be set to point to the unused flash page
705 */
kv_update_action_find_defrag_page(void)706 STATIC kv_update_event_t kv_update_action_find_defrag_page(void)
707 {
708 errcode_t res;
709 g_defrag_page_location = NULL;
710 res = kv_nvregion_find_unused_page(&g_defrag_page_location);
711 if (res == ERRCODE_SUCC) {
712 nv_log_debug("[NV] found defrag_page. loc = 0x%x\r\n", g_defrag_page_location);
713 return EVENT_DEFRAG_FOUND;
714 } else {
715 g_current_state_machine->error_code = res;
716 return EVENT_ERROR;
717 }
718 }
719
720 /*
721 * Attempts to erase the current defrag page, as defined by g_defrag_page_location
722 */
kv_update_action_erase_defrag_page(void)723 STATIC kv_update_event_t kv_update_action_erase_defrag_page(void)
724 {
725 errcode_t res;
726 res = kv_nvregion_erase_page(g_defrag_page_location);
727 if (res == ERRCODE_SUCC) {
728 return EVENT_DEFRAG_ERASED;
729 } else if (res == ERRCODE_NV_WRITE_VETOED) {
730 return EVENT_SUSPEND;
731 } else {
732 g_current_state_machine->error_code = res;
733 return EVENT_ERROR;
734 }
735 }
736
737 /*
738 * Attempts to start the g_prep_defrag_page_machine state machine
739 */
kv_update_action_prepare_defrag_page(void)740 STATIC kv_update_event_t kv_update_action_prepare_defrag_page(void)
741 {
742 errcode_t res = begin_state_machine(&g_prep_defrag_page_machine);
743 if (res == ERRCODE_SUCC) {
744 return EVENT_NONE;
745 } else {
746 g_current_state_machine->error_code = res;
747 return EVENT_ERROR;
748 }
749 }
750
751 /*
752 * Attempts to start the g_copy_all_keys_machine state machine
753 * Configures g_search_filter to find all valid (un-erased) keys
754 */
kv_update_action_copy_all_keys(void)755 STATIC kv_update_event_t kv_update_action_copy_all_keys(void)
756 {
757 /* Configure search filter to copy all valid keys */
758 g_search_filter.pattern = 0;
759 g_search_filter.mask = 0;
760 g_search_filter.state = KV_KEY_FILTER_STATE_VALID;
761 g_search_filter.type = KV_KEY_FILTER_TYPE_ANY;
762 g_search_filter.location = 0;
763
764 errcode_t res = begin_state_machine(&g_copy_all_keys_machine);
765 if (res == ERRCODE_SUCC) {
766 return EVENT_NONE;
767 } else {
768 g_current_state_machine->error_code = res;
769 return EVENT_ERROR;
770 }
771 }
772
773 /*
774 * Prepares g_current_state_machine->write_buffer with information to construct a page header for the
775 * current defrag page, as defined by g_defrag_page_location
776 * All information is obtained from g_current_page
777 * The page sequence_number is updated to indicate that this page will supersede g_current_page
778 */
kv_update_action_prepare_page_header(void)779 STATIC kv_update_event_t kv_update_action_prepare_page_header(void)
780 {
781 kv_managed_write_buffer_t *write_buffer = g_current_state_machine->write_buffer;
782 if (write_buffer == NULL) {
783 g_current_state_machine->error_code = ERRCODE_NV_WRITE_BUFFER_NOT_ALLOCATED;
784 return EVENT_ERROR;
785 }
786 write_buffer->write_location = (uint32_t)(uintptr_t)g_defrag_page_location;
787
788 if (memcpy_s(write_buffer->data, sizeof(kv_page_header_t), &g_current_page, sizeof(kv_page_header_t)) != EOK) {
789 return EVENT_ERROR;
790 }
791 write_buffer->data_length = (uint16_t)sizeof(kv_page_header_t);
792 write_buffer->data_consumed = 0;
793
794 /* Too specific, move to kv_page? */
795 kv_page_header_t *defrag_page_header = (kv_page_header_t *)write_buffer->data;
796 defrag_page_header->sequence_number++;
797 defrag_page_header->inverted_sequence_number = ~defrag_page_header->sequence_number;
798
799 return EVENT_WRITE_BUFFER_PRIMED;
800 }
801
802 /*
803 * Forces a re-scan of the KV NV flash region to update a map of KV pages
804 */
kv_update_action_update_nvregion_map(void)805 STATIC kv_update_event_t kv_update_action_update_nvregion_map(void)
806 {
807 errcode_t res;
808
809 res = kv_nvregion_scan();
810 if (res != ERRCODE_SUCC) {
811 g_current_state_machine->error_code = res;
812 return EVENT_ERROR;
813 }
814 return EVENT_NVREGION_MAP_UPDATED;
815 }
816
817 /*
818 * Attempts to find a suitable write location in g_current_store for g_new_key_details
819 * Will set g_current_store_write_location if enough space is found in g_current_store
820 * Will indicate that a defrag is required if there is not currently enough free space in g_current_store
821 * Will return an error if a defrag would not free up enough space for the new key
822 */
kv_update_action_find_write_position(void)823 STATIC kv_update_event_t kv_update_action_find_write_position(void)
824 {
825 errcode_t res;
826 uint16_t required_space;
827 kv_key_handle_t new_key;
828 kv_page_status_t page_status;
829
830 /* Work out space needed for new key */
831 kv_key_build_from_new(&new_key, &g_new_key_details, 0);
832 required_space = kv_key_flash_size(&new_key);
833
834 res = kv_store_find_write_page(g_current_store, required_space, &g_current_page, &page_status);
835 if (res == ERRCODE_SUCC) {
836 g_current_store_write_location = (kv_key_location)(uintptr_t)page_status.first_writable_location;
837 return EVENT_WRITE_POS_FOUND;
838 } else if (res == ERRCODE_NV_DEFRAGMENTATION_NEEDED) {
839 return EVENT_DEFRAG_REQUIRED;
840 } else {
841 g_current_state_machine->error_code = res;
842 return EVENT_ERROR;
843 }
844 }
845
846 /*
847 * Attempts to start the g_defrag_page_machine state machine
848 */
kv_update_action_defrag_current_page(void)849 STATIC kv_update_event_t kv_update_action_defrag_current_page(void)
850 {
851 errcode_t res = begin_state_machine(&g_defrag_page_machine);
852 if (res == ERRCODE_SUCC) {
853 return EVENT_NONE;
854 } else {
855 g_current_state_machine->error_code = res;
856 return EVENT_ERROR;
857 }
858 }
859
860 /*
861 * Attempts to start the g_prepare_store_machine state machine
862 */
kv_update_action_prepare_store(void)863 STATIC kv_update_event_t kv_update_action_prepare_store(void)
864 {
865 errcode_t res = begin_state_machine(&g_prepare_store_machine);
866 if (res == ERRCODE_SUCC) {
867 return EVENT_NONE;
868 } else {
869 g_current_state_machine->error_code = res;
870 return EVENT_ERROR;
871 }
872 }
873
874 /*
875 * Configure write location for g_current_state_machine->write_buffer
876 * Attempt to allocate memory for AES Control data structure, if key data is to be encrypted
877 */
kv_update_helper_prepare_write_buffer(void)878 STATIC kv_update_event_t kv_update_helper_prepare_write_buffer(void)
879 {
880 kv_managed_write_buffer_t *write_buffer = g_current_state_machine->write_buffer;
881
882 if (write_buffer == NULL) {
883 g_current_state_machine->error_code = ERRCODE_NV_WRITE_BUFFER_NOT_ALLOCATED;
884 return EVENT_ERROR;
885 }
886 write_buffer->write_location = (uint32_t)(uintptr_t)g_current_store_write_location;
887 return EVENT_KEY_DATA_READY;
888 }
889
890 /*
891 * Attempt to allocate a kv_managed_source_buffer_t for a key header
892 */
kv_update_helper_setup_key_header_source_buffer(void)893 STATIC kv_update_event_t kv_update_helper_setup_key_header_source_buffer(void)
894 {
895 kv_managed_source_buffer_t *source_buffer = NULL;
896
897 /* Setup source buffers for key header */
898 source_buffer = kv_update_source_buffer((uint8_t *)&g_current_key.header, sizeof(kv_key_header_t));
899 if (source_buffer == NULL) {
900 g_current_state_machine->error_code = ERRCODE_NV_KEY_HEADER_BUFFER_NOT_ALLOCATED;
901 return EVENT_ERROR;
902 }
903
904 #if (CONFIG_NV_SUPPORT_ENCRYPT == NV_YES)
905 if (((uint32_t)g_new_key_details.attributes & (uint32_t)KV_ATTRIBUTE_ENCRYPTED) != 0) {
906 #if (CONFIG_NV_SUPPORT_HASH_FOR_CRYPT == NV_YES)
907 source_buffer->hash_data = true;
908 #else
909 source_buffer->gcm_tag_data = true;
910 #endif
911 } else {
912 source_buffer->crc_data = true;
913 }
914
915 #else
916 source_buffer->crc_data = true;
917 #endif
918
919 source_buffer->encrypt_data = false;
920 return EVENT_KEY_DATA_READY;
921 }
922
923 /*
924 * Attempt to allocate a kv_managed_source_buffer_t for key data
925 */
kv_update_helper_setup_key_data_source_buffer(bool release_key_data)926 STATIC kv_update_event_t kv_update_helper_setup_key_data_source_buffer(bool release_key_data)
927 {
928 kv_managed_source_buffer_t *source_buffer = NULL;
929
930 /* Setup source buffer for key data */
931 source_buffer = kv_update_source_buffer((uint8_t *)g_new_key_details.kvalue, g_new_key_details.kvalue_length);
932 if (source_buffer == NULL) {
933 g_current_state_machine->error_code = ERRCODE_NV_KEY_DATA_BUFFER_NOT_ALLOCATED;
934 return EVENT_ERROR;
935 }
936 source_buffer->release_data = release_key_data;
937
938 #if (CONFIG_NV_SUPPORT_ENCRYPT == NV_YES)
939 if (((uint32_t)g_new_key_details.attributes & (uint32_t)KV_ATTRIBUTE_ENCRYPTED) != 0) {
940 #if (CONFIG_NV_SUPPORT_HASH_FOR_CRYPT == NV_YES)
941 source_buffer->hash_data = true;
942 #else
943 source_buffer->gcm_tag_data = true;
944 #endif
945 } else {
946 source_buffer->crc_data = true;
947 }
948
949 #else
950 source_buffer->crc_data = true;
951 #endif
952
953 source_buffer->encrypt_data = (((uint32_t)g_new_key_details.attributes & (uint32_t)KV_ATTRIBUTE_ENCRYPTED) != 0);
954 return EVENT_KEY_DATA_READY;
955 }
956
957 /*
958 * Attempt to allocate a kv_managed_source_buffer_t for a key hash
959 * Will also attempt to allocate a buffer to contain the calculated hash
960 */
kv_update_helper_setup_key_hash_source_buffer(void)961 STATIC kv_update_event_t kv_update_helper_setup_key_hash_source_buffer(void)
962 {
963 uint8_t *key_hash = NULL;
964 kv_managed_source_buffer_t *source_buffer = NULL;
965 uint32_t hash_crc_len = KV_CRYPTO_CRC_SIZE;
966 #if (CONFIG_NV_SUPPORT_ENCRYPT == NV_YES)
967 if (((uint32_t)g_new_key_details.attributes & (uint32_t)KV_ATTRIBUTE_ENCRYPTED) != 0) {
968 hash_crc_len = KV_CRYPTO_HASH_SIZE;
969 }
970 #endif
971
972 /* Setup source buffer for key hash */
973 /* The hash is calculated while data is written to flash then placed into this buffer for writing */
974 key_hash = (uint8_t *)kv_malloc(hash_crc_len);
975 if (key_hash == NULL) {
976 g_current_state_machine->error_code = ERRCODE_MALLOC;
977 return EVENT_ERROR;
978 }
979 source_buffer = kv_update_source_buffer(key_hash, hash_crc_len);
980 if (source_buffer == NULL) {
981 kv_free(key_hash);
982 g_current_state_machine->error_code = ERRCODE_NV_KEY_HASH_BUFFER_NOT_ALLOCATED;
983 return EVENT_ERROR;
984 }
985 source_buffer->release_data = true;
986 source_buffer->receive_hash = true;
987 source_buffer->encrypt_data = false;
988 return EVENT_KEY_DATA_READY;
989 }
990
991 /*
992 * Prepare kv_managed_source_buffers for the three separate parts of a key i.e. header, data and hash
993 * Ensures state machine write buffer is ready to receive data for writing
994 * CNcomment: 准备好NV写入需要的source buffer,NV的三个部分(header、数据、HASH/CRC)被放置在三个source buffer中
995 */
kv_update_helper_prepare_source_buffers(bool release_key_data)996 STATIC kv_update_event_t kv_update_helper_prepare_source_buffers(bool release_key_data)
997 {
998 kv_update_event_t event;
999
1000 event = kv_update_helper_prepare_write_buffer();
1001 if (event == EVENT_KEY_DATA_READY) {
1002 event = kv_update_helper_setup_key_header_source_buffer();
1003 }
1004 if (event == EVENT_KEY_DATA_READY) {
1005 event = kv_update_helper_setup_key_data_source_buffer(release_key_data);
1006 }
1007 if ((event != EVENT_KEY_DATA_READY) && release_key_data) {
1008 /* Ensure we release any new key data as it will not have been placed into a source buffer */
1009 /* which get cleaned up automatically when a state machine ends */
1010 kv_free((void *)g_new_key_details.kvalue);
1011 g_new_key_details.kvalue = NULL;
1012 }
1013 if (event == EVENT_KEY_DATA_READY) {
1014 event = kv_update_helper_setup_key_hash_source_buffer();
1015 }
1016 return event;
1017 }
1018
1019 /*
1020 * Checks if g_current_key would end up being modified by any changes detailed in g_new_key_details
1021 * Will return an error if g_current_key is permanent
1022 */
kv_update_helper_check_for_key_updates(void)1023 STATIC kv_update_event_t kv_update_helper_check_for_key_updates(void)
1024 {
1025 errcode_t res;
1026 kv_attributes_t attributes = kv_key_attributes(&g_current_key);
1027 if ((((uint32_t)attributes & (uint32_t)KV_ATTRIBUTE_PERMANENT) != 0) &&
1028 (g_new_key_details.focre_write != true)) {
1029 /* We can't modify permanent keys in any way */
1030 g_current_state_machine->error_code = ERRCODE_NV_TRYING_TO_MODIFY_A_PERMANENT_KEY;
1031 return EVENT_ERROR;
1032 }
1033 if (((uint32_t)attributes & (uint32_t)KV_ATTRIBUTE_NON_UPGRADE) !=
1034 ((uint32_t)g_new_key_details.attributes & (uint32_t)KV_ATTRIBUTE_NON_UPGRADE)) {
1035 /* Attributes are being updated */
1036 return EVENT_KEY_UPDATE_REQUIRED;
1037 }
1038 if (((uint32_t)attributes & (uint32_t)g_new_key_details.attributes) != (uint32_t)g_new_key_details.attributes) {
1039 /* Attributes are being updated */
1040 return EVENT_KEY_UPDATE_REQUIRED;
1041 }
1042
1043 if (g_current_key.header.length != g_new_key_details.kvalue_length) {
1044 /* New data is of a different length */
1045 return EVENT_KEY_UPDATE_REQUIRED;
1046 }
1047 res = kv_helper_compare_key_data(&g_current_key, g_new_key_details.kvalue,
1048 (uint16_t)g_new_key_details.kvalue_length);
1049 if (res != ERRCODE_SUCC) {
1050 /* New data content is different */
1051 return EVENT_KEY_UPDATE_REQUIRED;
1052 }
1053
1054 /* Key not actually being updated */
1055 return EVENT_KEY_UPDATE_NOT_NEEDED;
1056 }
1057
1058 /*
1059 * Constructs a new key header from g_new_key_details, copying the attributes of any existing key with the same key_id
1060 * Updates g_current_key with details of the new key header
1061 */
kv_update_action_prepare_write_key(void)1062 STATIC kv_update_event_t kv_update_action_prepare_write_key(void)
1063 {
1064 errcode_t res = kv_store_find_valid_key(g_current_store, g_new_key_details.key_id, &g_current_key);
1065 if (res == ERRCODE_SUCC) {
1066 kv_update_event_t event = kv_update_helper_check_for_key_updates();
1067 if (event != EVENT_KEY_UPDATE_REQUIRED) {
1068 nv_log_debug("[NV] The Key not need update. Key id = 0x%x\r\n", g_current_key.header.key_id);
1069 return event;
1070 }
1071
1072 /* Combine existing key attributes with those requested for the new key */
1073 if (g_current_key.header.type != KV_KEY_TYPE_NORMAL) {
1074 g_new_key_details.attributes = (kv_attributes_t)((uint32_t)g_new_key_details.attributes |
1075 (uint32_t)KV_ATTRIBUTE_PERMANENT);
1076 }
1077 #if (CONFIG_NV_SUPPORT_ENCRYPT == NV_YES)
1078 if (g_current_key.header.enc_key != 0) {
1079 g_new_key_details.attributes = (kv_attributes_t)((uint32_t)g_new_key_details.attributes |
1080 (uint32_t)KV_ATTRIBUTE_ENCRYPTED);
1081 }
1082 #endif
1083 }
1084
1085 kv_key_build_from_new(&g_current_key, &g_new_key_details, g_current_store_write_location);
1086 return kv_update_helper_prepare_source_buffers(false);
1087 }
1088
kv_update_helper_get_current_key(uint8_t ** old_kvalue,uint32_t * kvalue_length)1089 STATIC errcode_t kv_update_helper_get_current_key(uint8_t **old_kvalue, uint32_t *kvalue_length)
1090 {
1091 kv_key_handle_t current_key;
1092 errcode_t res;
1093
1094 res = kv_store_find_valid_key(g_current_store, g_new_key_details.key_id, ¤t_key);
1095 if (res != ERRCODE_SUCC) {
1096 return res;
1097 }
1098
1099 kv_attributes_t attributes = kv_key_attributes(¤t_key);
1100 if (((uint32_t)attributes & KV_ATTRIBUTE_PERMANENT) != 0) {
1101 return ERRCODE_SUCC;
1102 }
1103
1104 #if (CONFIG_NV_SUPPORT_ENCRYPT == NV_YES)
1105 if (((uint32_t)attributes & KV_ATTRIBUTE_ENCRYPTED) != 0) {
1106 /* if old key is encrypted, the new key must be encrypted too */
1107 g_new_key_details.attributes = (kv_attributes_t)((uint32_t)g_new_key_details.attributes |
1108 (uint32_t)KV_ATTRIBUTE_ENCRYPTED);
1109 }
1110 #endif
1111
1112 *kvalue_length = current_key.header.length;
1113 *old_kvalue = (uint8_t *)kv_malloc(current_key.header.length);
1114 if (*old_kvalue == NULL) {
1115 return ERRCODE_MALLOC;
1116 }
1117
1118 res = kv_key_read_data(¤t_key, *old_kvalue);
1119 if (res != ERRCODE_SUCC) {
1120 kv_free((void *)*old_kvalue);
1121 }
1122 return res;
1123 }
1124
1125 /*
1126 * Attempt to claim access to hardware AES and hash functions
1127 */
kv_update_action_claim_crypto(void)1128 STATIC kv_update_event_t kv_update_action_claim_crypto(void)
1129 {
1130 kv_managed_write_buffer_t *write_buffer = g_current_state_machine->write_buffer;
1131
1132 /* Reset write location */
1133 write_buffer->write_location = (uint32_t)(uintptr_t)g_current_key.key_location;
1134
1135 /* Point current source buffer at first buffer and reset data_consumed values */
1136 g_current_state_machine->current_source = g_current_state_machine->source_buffer;
1137 kv_managed_source_buffer_t *source_buffer = g_current_state_machine->source_buffer;
1138 while (source_buffer != NULL) {
1139 source_buffer->data_consumed = 0;
1140 source_buffer = source_buffer->next;
1141 }
1142
1143 write_buffer->crypto_handle = INVAILD_CRYPTO_HANDLE;
1144
1145 #if (CONFIG_NV_SUPPORT_ENCRYPT == NV_YES)
1146 if (((uint32_t)g_new_key_details.attributes & (uint32_t)KV_ATTRIBUTE_ENCRYPTED) != 0) {
1147 /* Claim cryptographic engines */
1148 if (!write_buffer->encrypt_claimed) {
1149 errcode_t res = nv_crypto_claim_aes(&write_buffer->crypto_handle, &(g_current_key.header));
1150 if (res != ERRCODE_SUCC) {
1151 g_current_state_machine->error_code = res;
1152 return EVENT_ERROR;
1153 }
1154 write_buffer->encrypt_claimed = true;
1155 }
1156
1157 #if (CONFIG_NV_SUPPORT_HASH_FOR_CRYPT == NV_YES)
1158 if (!write_buffer->hash_claimed) {
1159 errcode_t res = nv_crypto_start_hash();
1160 if (res != ERRCODE_SUCC) {
1161 nv_crypto_release_aes(write_buffer->crypto_handle);
1162 write_buffer->crypto_handle = INVAILD_CRYPTO_HANDLE;
1163 write_buffer->encrypt_claimed = false;
1164 g_current_state_machine->error_code = res;
1165 return EVENT_ERROR;
1166 }
1167 write_buffer->crc_ret = 0;
1168 write_buffer->hash_claimed = true;
1169 }
1170 #else
1171 if (!write_buffer->gcm_tag_claimed) {
1172 write_buffer->crc_ret = 0;
1173 write_buffer->gcm_tag_claimed = true;
1174 }
1175 #endif /* #if (CONFIG_NV_SUPPORT_HASH_FOR_CRYPT == NV_YES) */
1176 } else {
1177 #endif /* #if (CONFIG_NV_SUPPORT_ENCRYPT == NV_YES) */
1178 if (!write_buffer->crc_claimed) {
1179 write_buffer->crc_ret = 0;
1180 write_buffer->crc_claimed = true;
1181 }
1182 #if (CONFIG_NV_SUPPORT_ENCRYPT == NV_YES)
1183 }
1184 #endif
1185
1186 return EVENT_CRYPTO_CLAIMED;
1187 }
1188
1189 /*
1190 * Attempts to erases all instances of g_current_key (in g_current_store), by marking them as invalid
1191 * Configures g_search_filter to find all valid keys with an id matching g_current_key.header.key_id
1192 * Starts the g_erase_keys_machine state machine to perform the erasing of keys
1193 */
kv_update_action_erase_old_keys(void)1194 STATIC kv_update_event_t kv_update_action_erase_old_keys(void)
1195 {
1196 g_search_filter.pattern = g_current_key.header.key_id;
1197 g_search_filter.mask = 0xFFFF;
1198 g_search_filter.state = KV_KEY_FILTER_STATE_VALID;
1199 g_search_filter.type = KV_KEY_FILTER_TYPE_ANY;
1200 g_search_filter.location = g_current_key.key_location;
1201
1202 errcode_t res = begin_state_machine(&g_erase_keys_machine);
1203 if (res == ERRCODE_SUCC) {
1204 return EVENT_NONE;
1205 } else {
1206 g_current_state_machine->error_code = res;
1207 return EVENT_ERROR;
1208 }
1209 }
1210
kv_update_action_update_nvmap_for_erase_key(void)1211 STATIC kv_update_event_t kv_update_action_update_nvmap_for_erase_key(void)
1212 {
1213 kv_nvregion_map_t *nv_map = kv_nvregion_get_map();
1214 uint32_t page_number = kv_nvregion_get_page_number(g_current_page.page_location);
1215 if (page_number >= nv_map->num_entries) {
1216 nv_log_debug("[NV] get erase page_number failed! page_location = 0x%x\r\n", g_current_page.page_location);
1217 g_current_state_machine->error_code = ERRCODE_NV_INVALID_PAGE;
1218 return EVENT_ERROR;
1219 }
1220
1221 uint16_t key_size = (uint32_t)sizeof(kv_key_header_t);
1222 uint16_t hash_crc_size = KV_CRYPTO_CRC_SIZE;
1223 #if (CONFIG_NV_SUPPORT_ENCRYPT == NV_YES)
1224 if (g_current_key.header.enc_key == AES_KDFKEY_SDRK_TYPE) {
1225 hash_crc_size = KV_CRYPTO_HASH_SIZE;
1226 }
1227 #endif
1228 key_size += hash_crc_size;
1229
1230 kv_attributes_t attributes = kv_key_attributes(&g_current_key);
1231 key_size += kv_key_padded_data_length(attributes, g_current_key.header.length);
1232
1233 nv_page_status_map_t *page_status = &(nv_map->page_status_map[page_number]);
1234 page_status->reclaimable_space += key_size;
1235 nv_log_debug("[NV] update nv map for erase key = 0x%x (page = 0x%x) size = %d\r\n",
1236 g_current_key.header.key_id, g_current_page.page_location, key_size);
1237 return EVENT_PAGE_MAP_UPDATED;
1238 }
1239
kv_update_action_update_nvmap_for_new_key(void)1240 STATIC kv_update_event_t kv_update_action_update_nvmap_for_new_key(void)
1241 {
1242 kv_nvregion_map_t *nv_map = kv_nvregion_get_map();
1243
1244 uint32_t page_number = kv_nvregion_get_page_number(g_current_page.page_location);
1245 if (page_number >= nv_map->num_entries) {
1246 nv_log_debug("[NV] get new key page_number failed! page_location = 0x%x\r\n", g_current_page.page_location);
1247 g_current_state_machine->error_code = ERRCODE_NV_INVALID_PAGE;
1248 return EVENT_ERROR;
1249 }
1250
1251 uint16_t key_size = (uint32_t)sizeof(kv_key_header_t);
1252 uint16_t hash_crc_size = KV_CRYPTO_CRC_SIZE;
1253 #if (CONFIG_NV_SUPPORT_ENCRYPT == NV_YES)
1254 if (((uint32_t)g_new_key_details.attributes & (uint32_t)KV_ATTRIBUTE_ENCRYPTED) != 0) {
1255 hash_crc_size = KV_CRYPTO_HASH_SIZE;
1256 }
1257 #endif
1258
1259 key_size += hash_crc_size;
1260 key_size += kv_key_padded_data_length(g_new_key_details.attributes, (uint16_t)g_new_key_details.kvalue_length);
1261
1262 nv_page_status_map_t *page_status = &(nv_map->page_status_map[page_number]);
1263 page_status->used_space += key_size;
1264 page_status->first_writable_offset += key_size;
1265 nv_log_debug("[NV] update nv map for new key = 0x%x (page = 0x%x) size = %d\r\n",
1266 g_new_key_details.key_id, g_current_page.page_location, key_size);
1267 return EVENT_PAGE_MAP_UPDATED;
1268 }
1269
1270 /*
1271 * Begins a new state machine by creating a new entry at the end of the active state machines list
1272 * and updating g_current_state_machine
1273 *
1274 * Either called to begin processing a *new* top-level state machine, before calling process_state_machine()
1275 * or called by a state machine action function to kick off the processing of a new (nested) state machine
1276 */
begin_state_machine(const kv_state_machine_t * machine)1277 STATIC errcode_t begin_state_machine(const kv_state_machine_t *machine)
1278 {
1279 kv_active_state_machine_t *new_state_machine =
1280 (kv_active_state_machine_t *)kv_zalloc(sizeof(kv_active_state_machine_t));
1281 if (new_state_machine == NULL) {
1282 return ERRCODE_MALLOC;
1283 }
1284 new_state_machine->prev = g_current_state_machine;
1285
1286 /* Configure default initial state and event for new state machine */
1287 new_state_machine->machine = machine;
1288 new_state_machine->state = machine->initial_state;
1289 new_state_machine->event = EVENT_NONE;
1290 new_state_machine->error_code = ERRCODE_SUCC;
1291 g_current_state_machine = new_state_machine;
1292
1293 if (g_current_state_machine->machine->write_buffer_size > 0) {
1294 return kv_create_write_buffer(0, g_current_state_machine->machine->write_buffer_size);
1295 } else {
1296 return ERRCODE_SUCC;
1297 }
1298 }
1299
1300 /*
1301 * Should only be called by process_state_machine()
1302 *
1303 * Ends current state machine and updates g_current_state_machine to point to the previous state machine.
1304 * Passes either the pre-defined exit_event or the last event returned by an action function as the
1305 * event received by the previous state machine.
1306 */
end_state_machine(void)1307 STATIC void end_state_machine(void)
1308 {
1309 if (g_current_state_machine != NULL) {
1310 kv_remove_write_buffer();
1311 kv_remove_source_buffer();
1312
1313 kv_active_state_machine_t *prev_state_machine = g_current_state_machine->prev;
1314 if (prev_state_machine != NULL) {
1315 if (g_current_state_machine->machine->exit_event != EVENT_NONE) {
1316 /* Single explicit exit event defined for exiting state machine */
1317 prev_state_machine->event = g_current_state_machine->machine->exit_event;
1318 } else {
1319 /* Pass last event raised from exiting state machine back to invoking state machine */
1320 prev_state_machine->event = g_current_state_machine->event;
1321 }
1322 }
1323
1324 kv_free(g_current_state_machine);
1325 g_current_state_machine = prev_state_machine;
1326 }
1327 }
1328
clean_state_machine(void)1329 static void clean_state_machine(void)
1330 {
1331 while (g_current_state_machine != NULL) {
1332 end_state_machine();
1333 }
1334 }
1335
1336 /*
1337 * Should only be called by process_state_machine()
1338 *
1339 * Attempts to locate and call an action function based on the state of the current state machine
1340 * An action function may spawn another state machine and hence cause g_current_state_machine to change.
1341 *
1342 * g_current_state_machine->event is used to record the return value from the action function.
1343 *
1344 * If a new state machine is spawned, this return value will effectively be lost as the procedure for processing
1345 * new state machines is to assume there is no initial event, just an initial state and thus action to perform.
1346 *
1347 * If an action function is not found then g_current_state_machine->event will not be updated. This really should
1348 * be reserved for when state transitions cause g_current_state_machine->state to be one of the following
1349 * special cases, which are handled by process_state_machine():
1350 * - STATE_INVALID
1351 * - STATE_SUSPENDED
1352 * - STATE_EXIT
1353 */
invoke_current_state_action(void)1354 static void invoke_current_state_action(void)
1355 {
1356 if (g_current_state_machine != NULL) {
1357 /* Search action table to locate an action function for the current state */
1358 /* STATE_EXIT always marks end of an action table */
1359 const kv_update_action_t *update_action = g_current_state_machine->machine->action_table;
1360 while (update_action->state != STATE_EXIT) {
1361 if (update_action->state == g_current_state_machine->state) {
1362 /* Found a matching entry */
1363 break;
1364 }
1365 update_action++;
1366 }
1367
1368 /* Call action function if we have found one */
1369 if (update_action->action != NULL) {
1370 /* Action function could cause a change of g_current_state_machine */
1371 g_current_state_machine->event = (*update_action->action)();
1372 }
1373 }
1374 }
1375
1376 /*
1377 * Should only be called by process_state_machine()
1378 *
1379 * Uses the last event recorded for the state machine, in g_current_state_machine->event, to select the
1380 * next appropriate state for the current state machine.
1381 *
1382 * In the case of no suitable entry on the transition_table an error state of STATE_INVALID is entered.
1383 * This will cause process_state_machine() to terminate processing of all queued state machines and
1384 * return an error.
1385 */
update_current_state(void)1386 STATIC void update_current_state(void)
1387 {
1388 /* Don't update anything for EVENT_NONE. We'll eventually call the action function again */
1389 if ((g_current_state_machine != NULL) && (g_current_state_machine->event != EVENT_NONE)) {
1390 /* Search transition table looking for a transition entry for current state and event */
1391 /* A transition table is always terminated with transition->state == STATE_EXIT */
1392 const kv_update_transition_t *transition = g_current_state_machine->machine->transition_table;
1393 while (transition->state != STATE_EXIT) {
1394 if ((g_current_state_machine->state == transition->state) &&
1395 (g_current_state_machine->event == transition->event)) {
1396 /* Found a matching entry */
1397 break;
1398 }
1399 transition++;
1400 }
1401
1402 /* Update current state if we have found a suitable transition table entry */
1403 if (transition->state != STATE_EXIT) {
1404 g_current_state_machine->state = transition->next_state;
1405 } else {
1406 /* No state transition found for the current state and event */
1407 g_current_state_machine->state = STATE_INVALID;
1408 }
1409 }
1410 }
1411
1412 #if defined (DEBUG_PRINT_ENABLED)
1413 static const char *g_update_state_strings[STATE_EXIT + 1] = {
1414 [STATE_INVALID] = "STATE_INVALID",
1415 [STATE_SELECT_FIRST_PAGE] = "STATE_SELECT_FIRST_PAGE",
1416 [STATE_SELECT_NEXT_PAGE] = "STATE_SELECT_NEXT_PAGE",
1417 [STATE_FIND_FIRST_KEY] = "STATE_FIND_FIRST_KEY",
1418 [STATE_FIND_NEXT_KEY] = "STATE_FIND_NEXT_KEY",
1419 [STATE_FIND_EXISTING_KEY] = "STATE_FIND_EXISTING_KEY",
1420 [STATE_PREP_COPY_KEY] = "STATE_PREP_COPY_KEY",
1421 [STATE_PREP_DELETE_KEY] = "STATE_PREP_DELETE_KEY",
1422 [STATE_PREP_MODIFY_KEY] = "STATE_PREP_MODIFY_KEY",
1423 [STATE_FIND_DEFRAG] = "STATE_FIND_DEFRAG",
1424 [STATE_ERASE_DEFRAG] = "STATE_ERASE_DEFRAG",
1425 [STATE_PREP_DEFRAG] = "STATE_PREP_DEFRAG",
1426 [STATE_COPY_ALL_KEYS] = "STATE_COPY_ALL_KEYS",
1427 [STATE_PREP_PAGE_HEADER] = "STATE_PREP_PAGE_HEADER",
1428 [STATE_WRITE_PAGE_HEADER] = "STATE_WRITE_PAGE_HEADER",
1429 [STATE_UPDATE_NVREGION_MAP] = "STATE_UPDATE_NVREGION_MAP",
1430 [STATE_FIND_WRITE_POS] = "STATE_FIND_WRITE_POS",
1431 [STATE_DEFRAG_PAGE] = "STATE_DEFRAG_PAGE",
1432 [STATE_PREP_STORE] = "STATE_PREP_STORE",
1433 [STATE_PREP_KEY_DATA] = "STATE_PREP_KEY_DATA",
1434 [STATE_CLAIM_CRYPTO] = "STATE_CLAIM_CRYPTO",
1435 [STATE_ERASE_OLD_KEYS] = "STATE_ERASE_OLD_KEYS",
1436 [STATE_PRIME_WRITE] = "STATE_PRIME_WRITE",
1437 [STATE_PERFORM_WRITE] = "STATE_PERFORM_WRITE",
1438 [STATE_SUSPENDED] = "STATE_SUSPENDED",
1439 [STATE_EXIT] = "STATE_EXIT"
1440 };
1441
1442 #if defined (DEBUG_KV_UPDATE_STATE_MACHINE)
1443 static const char *g_update_event_strings[EVENT_ERROR + 1] = {
1444 [EVENT_NONE] = "EVENT_NONE",
1445 [EVENT_SUSPEND] = "EVENT_SUSPEND",
1446 [EVENT_WRITE_DATA_EXHAUSTED] = "EVENT_WRITE_DATA_EXHAUSTED",
1447 [EVENT_WRITE_BUFFER_PRIMED] = "EVENT_WRITE_BUFFER_PRIMED",
1448 [EVENT_WRITE_BUFFER_STORED] = "EVENT_WRITE_BUFFER_STORED",
1449 [EVENT_PAGE_SELECTED] = "EVENT_PAGE_SELECTED",
1450 [EVENT_PAGE_NOT_SELECTED] = "EVENT_PAGE_NOT_SELECTED",
1451 [EVENT_KEY_FOUND] = "EVENT_KEY_FOUND",
1452 [EVENT_KEY_NOT_FOUND] = "EVENT_KEY_NOT_FOUND",
1453 [EVENT_COPY_KEY_READY] = "EVENT_COPY_KEY_READY",
1454 [EVENT_KEYS_ERASED] = "EVENT_KEYS_ERASED",
1455 [EVENT_KEY_UPDATE_REQUIRED] = "EVENT_KEY_UPDATE_REQUIRED",
1456 [EVENT_KEY_UPDATE_NOT_NEEDED] = "EVENT_KEY_UPDATE_NOT_NEEDED",
1457 [EVENT_DEFRAG_FOUND] = "EVENT_DEFRAG_FOUND",
1458 [EVENT_DEFRAG_ERASED] = "EVENT_DEFRAG_ERASED",
1459 [EVENT_DEFRAG_PREPARED] = "EVENT_DEFRAG_PREPARED",
1460 [EVENT_ALL_KEYS_COPIED] = "EVENT_ALL_KEYS_COPIED",
1461 [EVENT_PAGE_HEADER_READY] = "EVENT_PAGE_HEADER_READY",
1462 [EVENT_NVREGION_MAP_UPDATED] = "EVENT_NVREGION_MAP_UPDATED",
1463 [EVENT_DEFRAG_REQUIRED] = "EVENT_DEFRAG_REQUIRED",
1464 [EVENT_DEFRAG_COMPLETE] = "EVENT_DEFRAG_COMPLETE",
1465 [EVENT_WRITE_POS_FOUND] = "EVENT_WRITE_POS_FOUND",
1466 [EVENT_STORE_READY] = "EVENT_STORE_READY",
1467 [EVENT_KEY_DATA_READY] = "EVENT_KEY_DATA_READY",
1468 [EVENT_CRYPTO_CLAIMED] = "EVENT_CRYPTO_CLAIMED",
1469 [EVENT_WRITE_COMPLETE] = "EVENT_WRITE_COMPLETE",
1470 [EVENT_ERROR] = "EVENT_ERROR"
1471 };
1472 #endif
1473 #endif
1474
dprint_state_machine(const char * str)1475 static void dprint_state_machine(const char *str)
1476 {
1477 unused(str);
1478 #if defined (DEBUG_KV_UPDATE_STATE_MACHINE)
1479 if (g_current_state_machine != NULL) {
1480 nv_log_debug("%08X: ", (uint32_t)g_current_state_machine);
1481 nv_log_debug("%s ", g_update_state_strings[g_current_state_machine->state]);
1482 nv_log_debug("%s ", str);
1483 nv_log_debug("%s\r\n", g_update_event_strings[g_current_state_machine->event]);
1484 } else {
1485 nv_log_debug("*** State Machine %s ***\r\n", str);
1486 }
1487 #endif
1488 }
1489
1490 /*
1491 * Function used to process queued state machines
1492 *
1493 * There is only ever one state machine being processed at any one time.
1494 *
1495 * State machines are started (queued) by explicitly calling begin_state_machine() before this function is called.
1496 * State machines themselves can spawn a nested state machine by also calling begin_state_machine() in an action
1497 * function.
1498 * When started, a state machine will be in the initial_state defined for the state machine.
1499 *
1500 * State machines are ended normally by transitioning to STATE_EXIT or abnormally by transitioning to STATE_INVALID.
1501 * In such instances, process_state_machine(), will call end_state_machine() to tidy up.
1502 *
1503 * State machines can be suspended at any point by transitioning to STATE_SUSPENDED.
1504 * When resumed, by calling process_state_machine() again, they will start from the resume_state defined for the
1505 * state machine.
1506 *
1507 * g_current_state_machine points to the current active state machine
1508 *
1509 * When a state machine is processed for the first time, or resumed following suspension, it is assumed there is no
1510 * current event that exists for the state machine in this context. An action function for the current state, which
1511 * will be defined by initial_state or resume_state, will provide an event to provide for further transitions through
1512 * the state machine.
1513 */
process_state_machine(void)1514 STATIC errcode_t process_state_machine(void)
1515 {
1516 errcode_t error_code = ERRCODE_SUCC;
1517 uint64_t flash_access_time = 0;
1518 unused(flash_access_time);
1519
1520 while (g_current_state_machine != NULL) {
1521 /* Call action function for the current state */
1522 dprint_state_machine("---");
1523 invoke_current_state_action();
1524 dprint_state_machine("-->");
1525
1526 /* Check event returned by action function */
1527 if (g_current_state_machine->event == EVENT_ERROR) {
1528 /* Ripple back down chain of state machines and report error */
1529 error_code = g_current_state_machine->error_code;
1530 clean_state_machine();
1531 return error_code;
1532 }
1533
1534 /* Transition state machine */
1535 update_current_state();
1536 dprint_state_machine("<--");
1537
1538 /* Has the state machine finished? */
1539 while ((g_current_state_machine != NULL) && (g_current_state_machine->state == STATE_EXIT)) {
1540 invoke_current_state_action(); /* Exit state could have an action function if it needs to do more */
1541 end_state_machine(); /* than just end the current state machine */
1542 update_current_state();
1543 dprint_state_machine("<==");
1544 }
1545
1546 /* Has state machine processing been suspended? */
1547 if ((g_current_state_machine != NULL) && (g_current_state_machine->state == STATE_SUSPENDED)) {
1548 #ifdef CONFIG_NV_SUPPORT_SINGLE_CORE_SYSTEM
1549 /* if in one-core system, the flash must be writen in one time */
1550 /* so once the suspend happened, clear the state_machine */
1551 clean_state_machine();
1552 #else
1553 /* We need to suspend processing due to other cores waking up */
1554 /* Configure state machine to resume from the correct place */
1555 g_current_state_machine->state = g_current_state_machine->machine->resume_state;
1556 g_current_state_machine->event = EVENT_NONE;
1557 #endif
1558 kv_release_crypto();
1559 return ERRCODE_NV_WRITE_VETOED;
1560 }
1561
1562 /* Has the state machine entered an invalid state? */
1563 if ((g_current_state_machine != NULL) && (g_current_state_machine->state == STATE_INVALID)) {
1564 #ifdef CONFIG_NV_SUPPORT_SINGLE_CORE_SYSTEM
1565 clean_state_machine();
1566 #endif
1567 kv_release_crypto();
1568 return ERRCODE_NV_STATE_INVALID;
1569 }
1570 }
1571
1572 return error_code;
1573 }
1574
1575 /* Creates a write buffer used to prepare data, gathered from one or more source buffers, for writing to flash */
kv_create_write_buffer(uint32_t write_location,uint32_t size)1576 STATIC errcode_t kv_create_write_buffer(uint32_t write_location, uint32_t size)
1577 {
1578 if (size == 0) {
1579 return ERRCODE_INVALID_PARAM;
1580 }
1581
1582 if (g_current_state_machine->write_buffer != NULL) {
1583 return ERRCODE_FAIL;
1584 }
1585
1586 uint8_t *data = (uint8_t *)kv_malloc(size);
1587 if (data == NULL) {
1588 return ERRCODE_MALLOC;
1589 }
1590 kv_managed_write_buffer_t *write_buffer =
1591 (kv_managed_write_buffer_t *)kv_zalloc(sizeof(kv_managed_write_buffer_t));
1592 if (write_buffer == NULL) {
1593 kv_free(data);
1594 return ERRCODE_MALLOC;
1595 }
1596
1597 write_buffer->data = data;
1598 write_buffer->size = (uint16_t)size;
1599 write_buffer->data_consumed = (uint16_t)size;
1600 write_buffer->write_location = write_location;
1601 write_buffer->resume_location = write_location;
1602 g_current_state_machine->write_buffer = write_buffer;
1603
1604 return ERRCODE_SUCC;
1605 }
1606
1607 /* Removes the write buffer owned by the current state machine */
kv_remove_write_buffer(void)1608 STATIC void kv_remove_write_buffer(void)
1609 {
1610 if (g_current_state_machine->write_buffer != NULL) {
1611 kv_free(g_current_state_machine->write_buffer->data);
1612 kv_free(g_current_state_machine->write_buffer);
1613 g_current_state_machine->write_buffer = NULL;
1614 }
1615 }
1616
1617 /* Every key consists of a header, data and a hash. Multiple kv_managed_source_buffer_t are constructed to
1618 * manage these separate blocks of data.
1619 * They keep track of where the data is located and how much of it has
1620 * been written to flash.
1621 * These buffers are used in a scatter-gather type approach to feed a single write buffer that holds data
1622 * that has been fully prepared for writing to flash */
kv_update_source_buffer(uint8_t * data,uint32_t data_length)1623 STATIC kv_managed_source_buffer_t* kv_update_source_buffer(uint8_t *data, uint32_t data_length)
1624 {
1625 kv_managed_source_buffer_t *source_buffer = kv_zalloc(sizeof(kv_managed_source_buffer_t));
1626 if (source_buffer == NULL) {
1627 return NULL;
1628 }
1629
1630 source_buffer->data = data;
1631 source_buffer->data_length = (uint16_t)data_length;
1632 source_buffer->next = NULL;
1633
1634 /* Append new buffer to end of list */
1635 kv_managed_source_buffer_t **next_buffer = &g_current_state_machine->source_buffer;
1636 while (*next_buffer != NULL) {
1637 next_buffer = &(*next_buffer)->next;
1638 }
1639 *next_buffer = source_buffer;
1640 return source_buffer;
1641 }
1642
1643 /* Removes kv_managed_source_buffer_t data structures, freeing memory holding actual data too, if necessary */
kv_remove_source_buffer(void)1644 STATIC void kv_remove_source_buffer(void)
1645 {
1646 kv_managed_source_buffer_t *current = g_current_state_machine->source_buffer;
1647 while (current != NULL) {
1648 kv_managed_source_buffer_t *next = current->next;
1649 if (current->release_data) {
1650 kv_free(current->data);
1651 }
1652 kv_free(current);
1653 current = next;
1654 }
1655 g_current_state_machine->source_buffer = NULL;
1656 }
1657
1658 /* Releases claim over hardware AES and hash functions */
kv_release_crypto(void)1659 STATIC void kv_release_crypto(void)
1660 {
1661 if (g_current_state_machine == NULL) {
1662 return;
1663 }
1664 kv_managed_write_buffer_t *write_buffer = g_current_state_machine->write_buffer;
1665 uint32_t status;
1666
1667 if (write_buffer != NULL) {
1668 status = osal_irq_lock();
1669 if (write_buffer->crc_claimed) {
1670 write_buffer->crc_ret = 0;
1671 write_buffer->crc_claimed = false;
1672 }
1673 #if (CONFIG_NV_SUPPORT_ENCRYPT == NV_YES)
1674 if (write_buffer->hash_claimed) {
1675 write_buffer->hash_claimed = false;
1676 }
1677 if (write_buffer->gcm_tag_claimed) {
1678 write_buffer->gcm_tag_claimed = false;
1679 }
1680 if (write_buffer->encrypt_claimed) {
1681 nv_crypto_release_aes(write_buffer->crypto_handle);
1682 write_buffer->crypto_handle = INVAILD_CRYPTO_HANDLE;
1683 write_buffer->encrypt_claimed = false;
1684 }
1685 #endif
1686 osal_irq_restore(status);
1687 }
1688 }
1689
active_state_machine(void)1690 STATIC bool active_state_machine(void)
1691 {
1692 if (g_current_state_machine != NULL) {
1693 return true;
1694 }
1695 return false;
1696 }
1697
determine_flash_task_state_code(errcode_t res)1698 STATIC uint32_t determine_flash_task_state_code(errcode_t res)
1699 {
1700 switch (res) {
1701 case ERRCODE_NV_WRITE_VETOED:
1702 case ERRCODE_FLASH_TASK_PE_VETO:
1703 return FLASH_TASK_BEING_PROCESSED;
1704 case ERRCODE_SUCC:
1705 return FLASH_TASK_COMPLETED;
1706 default:
1707 return (uint32_t)res;
1708 }
1709 }
1710
1711 #if defined(CONFIG_PARTITION_FEATURE_SUPPORT)
nv_read_partition_addr(partition_ids_t partition_nv_id,uint32_t * start_address,uint32_t * size)1712 STATIC errcode_t nv_read_partition_addr(partition_ids_t partition_nv_id, uint32_t *start_address, uint32_t *size)
1713 {
1714 /* 获取分区表标记 */
1715 errcode_t ret;
1716 partition_information_t info;
1717
1718 ret = uapi_partition_get_info(partition_nv_id, &info);
1719 if (ret != ERRCODE_SUCC) {
1720 return ret;
1721 }
1722 *start_address = info.part_info.addr_info.addr + FLASH_PHYSICAL_ADDR_START;
1723 *size = info.part_info.addr_info.size;
1724
1725 return ERRCODE_SUCC;
1726 }
1727 #endif
1728
kv_update_init(cores_t core)1729 errcode_t kv_update_init(cores_t core)
1730 {
1731 unused(core);
1732
1733 errcode_t res;
1734 uint32_t kv_start_addr = 0;
1735 uint32_t kv_size = 0;
1736 uint32_t kv_backup_start_addr = 0;
1737 uint32_t kv_backup_size = 0;
1738 #if defined(CONFIG_PARTITION_FEATURE_SUPPORT)
1739 res = nv_read_partition_addr(PARTITION_NV_DATA, &kv_start_addr, &kv_size);
1740 if (res != ERRCODE_SUCC) {
1741 return res;
1742 }
1743 kv_backup_start_addr = kv_start_addr + KV_STORE_DATA_SIZE;
1744 kv_backup_size = KV_BACKUP_DATA_SIZE;
1745 #if defined(CONFIG_NV_SUPPORT_BACKUP_REGION) && (CONFIG_NV_SUPPORT_BACKUP_REGION == NV_YES)
1746 res = nv_read_partition_addr(PARTITION_NV_DATA_BACKUP, &kv_backup_start_addr, &kv_backup_size);
1747 if (res != ERRCODE_SUCC) {
1748 return res;
1749 }
1750 #endif
1751 #endif
1752 if (kv_start_addr != 0 && kv_size != 0) {
1753 kv_nvregion_init(kv_start_addr, KV_STORE_DATA_SIZE, kv_backup_start_addr, kv_backup_size);
1754 } else {
1755 kv_nvregion_init(KV_STORE_START_ADDR, KV_STORE_DATA_SIZE, KV_BACKUP_START_ADDR, KV_BACKUP_DATA_SIZE);
1756 }
1757
1758 /* Scan NV region looking for KV pages */
1759 res = kv_nvregion_scan();
1760 if (res != ERRCODE_SUCC) {
1761 return res;
1762 }
1763
1764 /* Check expected KV pages have been found */
1765 for (uint8_t store = (uint8_t)KV_STORE_APPLICATION; store < (uint8_t)KV_STORE_MAX_NUM; store++) {
1766 uint16_t store_id = kv_store_get_id(store);
1767 uint8_t pages_num = kv_store_get_page_count(store);
1768 for (uint8_t page_index = 0; page_index < pages_num; page_index++) {
1769 res = kv_nvregion_find_page(store_id, page_index, NULL, NULL);
1770 if (res != ERRCODE_SUCC) {
1771 /* NV Region does not contain an expected KV page, attempt to create it */
1772 res = kv_nvregion_create_page(store_id, page_index);
1773 }
1774 if (res != ERRCODE_SUCC) {
1775 return ERRCODE_FAIL;
1776 }
1777 }
1778 }
1779 return ERRCODE_SUCC;
1780 }
1781
1782 #if (CONFIG_NV_SUPPORT_BACKUP_RESTORE == NV_YES)
kv_updata_backup_page_head_is_valid(const kv_page_header_t * backup_head)1783 STATIC errcode_t kv_updata_backup_page_head_is_valid(const kv_page_header_t *backup_head)
1784 {
1785 if ((backup_head->details.store_id == KV_STORE_ID_BACKUP) &&
1786 ((~backup_head->inverted_details_word) == *(uint32_t *)(uintptr_t)(&backup_head->details)) &&
1787 ((~backup_head->inverted_sequence_number) == backup_head->sequence_number)) {
1788 return ERRCODE_SUCC;
1789 }
1790 return ERRCODE_FAIL;
1791 }
1792
kv_updata_backup_fail_process(uint32_t back_maybe_need_process_location)1793 STATIC errcode_t kv_updata_backup_fail_process(uint32_t back_maybe_need_process_location)
1794 {
1795 errcode_t res;
1796 kv_page_location unused_page_location = NULL;
1797 uint32_t page_head_size = (uint32_t)sizeof(kv_page_header_t);
1798 kv_nvregion_area_t* nvregion_area = nv_get_region_area();
1799 if (nvregion_area == NULL) {
1800 return ERRCODE_FAIL;
1801 }
1802 /* 去找工作区可以换页的页,然后将可以换页的页地址给传出 */
1803 res = kv_nvregion_find_unused_page(&unused_page_location);
1804 if (res != ERRCODE_SUCC) {
1805 return res;
1806 }
1807 /* 获取工作区换页页的页头 */
1808 kv_page_header_t store_head_buffer;
1809 res = kv_key_helper_copy_flash((uint32_t)(uintptr_t)&store_head_buffer,
1810 (uint32_t)(uintptr_t)unused_page_location, (uint16_t)page_head_size);
1811 if (res != ERRCODE_SUCC) {
1812 return res;
1813 }
1814 /* 如果页头中包含 KV_STORE_ID_BACKUP 并且该page_location与maybe_need_process_location时都指向同一页 那么说明存在掉电丢数据的情况 */
1815 if (kv_updata_backup_page_head_is_valid(&store_head_buffer) == ERRCODE_SUCC) {
1816 uint32_t back_page_location = nvregion_area->nv_backup_addr +
1817 store_head_buffer.details.page_index * KV_PAGE_SIZE;
1818 if (back_page_location == back_maybe_need_process_location) {
1819 res = kv_key_erase_flash(back_page_location, KV_PAGE_SIZE);
1820 if (res != ERRCODE_SUCC) {
1821 return res;
1822 }
1823 res = kv_backup_copy_unused_page_to_dragpage(back_page_location, (uint32_t)(uintptr_t)unused_page_location);
1824 if (res != ERRCODE_SUCC) {
1825 return res;
1826 }
1827 }
1828 }
1829 return ERRCODE_SUCC;
1830 }
1831
kv_update_backup_init(void)1832 errcode_t kv_update_backup_init(void)
1833 {
1834 /* Initializes the page header of the backup area */
1835 errcode_t res;
1836 uint32_t backup_not_inited_count = 0; /* 重启时备份区没有初始化的数量为0 */
1837 uint32_t back_maybe_need_process_location = 0;
1838 kv_nvregion_area_t* nvregion_area = nv_get_region_area();
1839 if (nvregion_area == NULL) {
1840 return ERRCODE_FAIL;
1841 }
1842 uint32_t back_page_location = nvregion_area->nv_backup_addr;
1843 kv_page_header_t backup_head_buffer;
1844 for (uint32_t page_index = 0; page_index < KV_BACKUP_PAGE_NUM; page_index++) {
1845 res = kv_key_helper_copy_flash((uint32_t)(uintptr_t)&backup_head_buffer,
1846 (uint32_t)back_page_location, sizeof(kv_page_header_t));
1847 if (res != ERRCODE_SUCC) {
1848 return res;
1849 }
1850 if (kv_updata_backup_page_head_is_valid(&backup_head_buffer) == ERRCODE_FAIL) {
1851 backup_not_inited_count++;
1852 res = kv_nvregion_write_page((kv_page_location)(uintptr_t)back_page_location,
1853 KV_STORE_ID_BACKUP, (uint8_t)page_index);
1854 if (res != ERRCODE_SUCC) {
1855 return res;
1856 }
1857 back_maybe_need_process_location = back_page_location;
1858 }
1859 back_page_location += KV_PAGE_SIZE;
1860 }
1861
1862 /* 如果备份区没有初始化的页的数量为0, 说明备份区页不存在异常,正常重启 */
1863 /* 如果备份区未初始化的页的数量大于1, 说明是刚刚烧录状态,对备份区页头的初始化 */
1864 /* 如果备份区没有初始化的页的数量为1, 那么说明可能存在掉电异常场景进行处理 */
1865 if (backup_not_inited_count == 1) {
1866 res = kv_updata_backup_fail_process(back_maybe_need_process_location);
1867 if (res != ERRCODE_SUCC) {
1868 return res;
1869 }
1870 }
1871 return res;
1872 }
1873 #endif /* #if (CONFIG_NV_SUPPORT_BACKUP_RESTORE == NV_YES) */
1874
1875
1876 /* Top level function used to mark a single key in a store as invalid (and hence erased) */
1877 /* Will actually scan a store for all keys matching the specified key_id, to ensure they are all marked as erased */
kv_update_erase_key(kv_store_t core,flash_task_node * sanitised_tasks)1878 errcode_t kv_update_erase_key(kv_store_t core, flash_task_node *sanitised_tasks)
1879 {
1880 if (!active_state_machine()) {
1881 g_current_store = core;
1882
1883 /* Configure search filter to obtain all valid instances of a specific key_id */
1884 g_search_filter.pattern = sanitised_tasks->data.kv_erase.key;
1885 g_search_filter.mask = 0xFFFF;
1886 g_search_filter.state = KV_KEY_FILTER_STATE_VALID;
1887 g_search_filter.type = KV_KEY_FILTER_TYPE_ANY;
1888 g_search_filter.location = 0;
1889
1890 /* Queue Erase Keys state machine for processing */
1891 begin_state_machine(&g_erase_keys_machine);
1892 }
1893
1894 /* Run state machine until it is either suspended (yielding due to flash contention) or */
1895 /* completed (either successfully or otherwise) */
1896 sanitised_tasks->state_code = determine_flash_task_state_code(process_state_machine());
1897 #ifdef CONFIG_NV_SUPPORT_SINGLE_CORE_SYSTEM
1898 if (sanitised_tasks->state_code == FLASH_TASK_COMPLETED) {
1899 return ERRCODE_SUCC;
1900 } else {
1901 return (errcode_t)sanitised_tasks->state_code;
1902 }
1903 #else
1904 return ERRCODE_SUCC;
1905 #endif
1906 }
1907
1908 /* Top level function to write a new key or modify an existing key-value */
1909 /* Will cause a new instance of the key to be generated */
kv_update_write_key(kv_store_t core,flash_task_node * sanitised_task)1910 errcode_t kv_update_write_key(kv_store_t core, flash_task_node *sanitised_task)
1911 {
1912 if (!active_state_machine()) {
1913 g_current_store = core;
1914
1915 /* Log details of new key to be written */
1916 g_new_key_details.key_id = sanitised_task->data.kv.key;
1917 g_new_key_details.kvalue = sanitised_task->data.kv.kvalue;
1918 g_new_key_details.kvalue_length = sanitised_task->data.kv.kvalue_length;
1919 g_new_key_details.attributes = sanitised_task->data.kv.attribute;
1920 g_new_key_details.focre_write = sanitised_task->data.kv.force_write;
1921
1922 /* Queue Write Key state machine for processing */
1923 begin_state_machine(&g_write_key_machine);
1924 }
1925
1926 /* Run state machine until it is either suspended (yielding due to flash contention) or */
1927 /* completed (either successfully or otherwise) */
1928 sanitised_task->state_code = determine_flash_task_state_code(process_state_machine());
1929 #ifdef CONFIG_NV_SUPPORT_SINGLE_CORE_SYSTEM
1930 if (sanitised_task->state_code == FLASH_TASK_COMPLETED) {
1931 return ERRCODE_SUCC;
1932 } else {
1933 return (errcode_t)sanitised_task->state_code;
1934 }
1935 #else
1936 return ERRCODE_SUCC;
1937 #endif
1938 }
1939
1940 /* Modify the attribute on an existing key-value */
1941 /* Top level function to modify the attributes of an existing key */
1942 /* Will cause a new instance of the key to be generated */
1943 /* Existing key attributes will be maintained */
kv_update_modify_attribute(kv_store_t core,flash_task_node * sanitised_task)1944 errcode_t kv_update_modify_attribute(kv_store_t core, flash_task_node *sanitised_task)
1945 {
1946 uint8_t *old_kvalue = NULL;
1947 errcode_t res;
1948 if (!active_state_machine()) {
1949 g_current_store = core;
1950 /* Log details of *additional* attributes to be applied to an existing key */
1951 g_new_key_details.key_id = sanitised_task->data.kv_attribute.key;
1952 g_new_key_details.kvalue = NULL;
1953 g_new_key_details.kvalue_length = 0;
1954 g_new_key_details.attributes = sanitised_task->data.kv_attribute.attribute;
1955 g_new_key_details.focre_write = false;
1956
1957 res = kv_update_helper_get_current_key(&old_kvalue, &g_new_key_details.kvalue_length);
1958 if (res != ERRCODE_SUCC) {
1959 return res;
1960 }
1961 g_new_key_details.kvalue = old_kvalue;
1962 /* Queue Modify Key state machine for processing */
1963 begin_state_machine(&g_write_key_machine);
1964 }
1965
1966 /* Run state machine until it is either suspended (yielding due to flash contention) or */
1967 /* completed (either successfully or otherwise) */
1968 sanitised_task->state_code = determine_flash_task_state_code(process_state_machine());
1969 #ifdef CONFIG_NV_SUPPORT_SINGLE_CORE_SYSTEM
1970 kv_free((void *)old_kvalue);
1971 if (sanitised_task->state_code == FLASH_TASK_COMPLETED) {
1972 return ERRCODE_SUCC;
1973 } else {
1974 return (errcode_t)sanitised_task->state_code;
1975 }
1976 #else
1977 if (sanitised_task->state_code != FLASH_TASK_BEING_PROCESSED) {
1978 kv_free((void *)old_kvalue);
1979 }
1980 return ERRCODE_SUCC;
1981 #endif
1982 }
1983
1984