This source file includes following definitions.
- _zend_is_inconsistent
- zend_hash_check_size
- zend_hash_real_init_ex
- zend_hash_check_init
- _zend_hash_init
- zend_hash_packed_grow
- zend_hash_real_init
- zend_hash_packed_to_hash
- zend_hash_to_packed
- _zend_hash_init_ex
- zend_hash_extend
- zend_array_recalc_elements
- zend_array_count
- zend_hash_set_apply_protection
- zend_hash_iterator_add
- zend_hash_iterator_pos
- zend_hash_iterator_pos_ex
- zend_hash_iterator_del
- _zend_hash_iterators_remove
- zend_hash_iterators_remove
- zend_hash_iterators_lower_pos
- _zend_hash_iterators_update
- zend_hash_find_bucket
- zend_hash_str_find_bucket
- zend_hash_index_find_bucket
- _zend_hash_add_or_update_i
- _zend_hash_add_or_update
- _zend_hash_add
- _zend_hash_update
- _zend_hash_update_ind
- _zend_hash_add_new
- _zend_hash_str_add_or_update
- _zend_hash_str_update
- _zend_hash_str_update_ind
- _zend_hash_str_add
- _zend_hash_str_add_new
- zend_hash_index_add_empty_element
- zend_hash_add_empty_element
- zend_hash_str_add_empty_element
- _zend_hash_index_add_or_update_i
- _zend_hash_index_add_or_update
- _zend_hash_index_add
- _zend_hash_index_add_new
- _zend_hash_index_update
- _zend_hash_next_index_insert
- _zend_hash_next_index_insert_new
- zend_hash_do_resize
- zend_hash_rehash
- _zend_hash_del_el_ex
- _zend_hash_del_el
- zend_hash_del_bucket
- zend_hash_del
- zend_hash_del_ind
- zend_hash_str_del_ind
- zend_hash_str_del
- zend_hash_index_del
- zend_hash_destroy
- zend_array_destroy
- zend_hash_clean
- zend_symtable_clean
- zend_hash_graceful_destroy
- zend_hash_graceful_reverse_destroy
- zend_hash_apply
- zend_hash_apply_with_argument
- zend_hash_apply_with_arguments
- zend_hash_reverse_apply
- zend_hash_copy
- zend_array_dup_element
- zend_array_dup_packed_elements
- zend_array_dup_elements
- zend_array_dup
- _zend_hash_merge
- zend_hash_replace_checker_wrapper
- zend_hash_merge_ex
- zend_hash_find
- zend_hash_str_find
- zend_hash_exists
- zend_hash_str_exists
- zend_hash_index_find
- zend_hash_index_exists
- zend_hash_internal_pointer_reset_ex
- zend_hash_internal_pointer_end_ex
- zend_hash_move_forward_ex
- zend_hash_move_backwards_ex
- zend_hash_get_current_key_ex
- zend_hash_get_current_key_zval_ex
- zend_hash_get_current_key_type_ex
- zend_hash_get_current_data_ex
- zend_hash_bucket_swap
- zend_hash_bucket_renum_swap
- zend_hash_bucket_packed_swap
- zend_hash_sort_ex
- zend_hash_compare_impl
- zend_hash_compare
- zend_hash_minmax
- _zend_handle_numeric_str_ex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include "zend.h"
24 #include "zend_globals.h"
25 #include "zend_variables.h"
26
27 #define HT_DEBUG 0
28 #if HT_DEBUG
29 # define HT_ASSERT(c) ZEND_ASSERT(c)
30 #else
31 # define HT_ASSERT(c)
32 #endif
33
34 #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
35
36 #if ZEND_DEBUG
37
38
39
40 #define HT_OK 0x00
41 #define HT_IS_DESTROYING 0x40
42 #define HT_DESTROYED 0x80
43 #define HT_CLEANING 0xc0
44
45 static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
46 {
47 if ((ht->u.flags & HASH_MASK_CONSISTENCY) == HT_OK) {
48 return;
49 }
50 switch ((ht->u.flags & HASH_MASK_CONSISTENCY)) {
51 case HT_IS_DESTROYING:
52 zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
53 break;
54 case HT_DESTROYED:
55 zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
56 break;
57 case HT_CLEANING:
58 zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
59 break;
60 default:
61 zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
62 break;
63 }
64 zend_bailout();
65 }
66 #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
67 #define SET_INCONSISTENT(n) do { \
68 (ht)->u.flags |= n; \
69 } while (0)
70 #else
71 #define IS_CONSISTENT(a)
72 #define SET_INCONSISTENT(n)
73 #endif
74
75 #define HASH_PROTECT_RECURSION(ht) \
76 if ((ht)->u.flags & HASH_FLAG_APPLY_PROTECTION) { \
77 if (((ht)->u.flags & ZEND_HASH_APPLY_COUNT_MASK) >= (3 << 8)) { \
78 zend_error_noreturn(E_ERROR, "Nesting level too deep - recursive dependency?");\
79 } \
80 ZEND_HASH_INC_APPLY_COUNT(ht); \
81 }
82
83 #define HASH_UNPROTECT_RECURSION(ht) \
84 if ((ht)->u.flags & HASH_FLAG_APPLY_PROTECTION) { \
85 ZEND_HASH_DEC_APPLY_COUNT(ht); \
86 }
87
88 #define ZEND_HASH_IF_FULL_DO_RESIZE(ht) \
89 if ((ht)->nNumUsed >= (ht)->nTableSize) { \
90 zend_hash_do_resize(ht); \
91 }
92
93 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
94
95 static uint32_t zend_always_inline zend_hash_check_size(uint32_t nSize)
96 {
97 #if defined(ZEND_WIN32)
98 unsigned long index;
99 #endif
100
101
102
103 if (nSize < HT_MIN_SIZE) {
104 nSize = HT_MIN_SIZE;
105 } else if (UNEXPECTED(nSize >= HT_MAX_SIZE)) {
106 zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
107 }
108
109 #if defined(ZEND_WIN32)
110 if (BitScanReverse(&index, nSize - 1)) {
111 return 0x2 << ((31 - index) ^ 0x1f);
112 } else {
113
114
115 return nSize;
116 }
117 #elif (defined(__GNUC__) || __has_builtin(__builtin_clz)) && defined(PHP_HAVE_BUILTIN_CLZ)
118 return 0x2 << (__builtin_clz(nSize - 1) ^ 0x1f);
119 #else
120 nSize -= 1;
121 nSize |= (nSize >> 1);
122 nSize |= (nSize >> 2);
123 nSize |= (nSize >> 4);
124 nSize |= (nSize >> 8);
125 nSize |= (nSize >> 16);
126 return nSize + 1;
127 #endif
128 }
129
130 static void zend_always_inline zend_hash_real_init_ex(HashTable *ht, int packed)
131 {
132 HT_ASSERT(GC_REFCOUNT(ht) == 1);
133 ZEND_ASSERT(!((ht)->u.flags & HASH_FLAG_INITIALIZED));
134 if (packed) {
135 HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
136 (ht)->u.flags |= HASH_FLAG_INITIALIZED | HASH_FLAG_PACKED;
137 HT_HASH_RESET_PACKED(ht);
138 } else {
139 (ht)->nTableMask = -(ht)->nTableSize;
140 HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
141 (ht)->u.flags |= HASH_FLAG_INITIALIZED;
142 if (EXPECTED(ht->nTableMask == -8)) {
143 Bucket *arData = ht->arData;
144
145 HT_HASH_EX(arData, -8) = -1;
146 HT_HASH_EX(arData, -7) = -1;
147 HT_HASH_EX(arData, -6) = -1;
148 HT_HASH_EX(arData, -5) = -1;
149 HT_HASH_EX(arData, -4) = -1;
150 HT_HASH_EX(arData, -3) = -1;
151 HT_HASH_EX(arData, -2) = -1;
152 HT_HASH_EX(arData, -1) = -1;
153 } else {
154 HT_HASH_RESET(ht);
155 }
156 }
157 }
158
159 static void zend_always_inline zend_hash_check_init(HashTable *ht, int packed)
160 {
161 HT_ASSERT(GC_REFCOUNT(ht) == 1);
162 if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) {
163 zend_hash_real_init_ex(ht, packed);
164 }
165 }
166
167 #define CHECK_INIT(ht, packed) \
168 zend_hash_check_init(ht, packed)
169
170 static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
171 {HT_INVALID_IDX, HT_INVALID_IDX};
172
173 ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent ZEND_FILE_LINE_DC)
174 {
175 GC_REFCOUNT(ht) = 1;
176 GC_TYPE_INFO(ht) = IS_ARRAY;
177 ht->u.flags = (persistent ? HASH_FLAG_PERSISTENT : 0) | HASH_FLAG_APPLY_PROTECTION | HASH_FLAG_STATIC_KEYS;
178 ht->nTableSize = zend_hash_check_size(nSize);
179 ht->nTableMask = HT_MIN_MASK;
180 HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
181 ht->nNumUsed = 0;
182 ht->nNumOfElements = 0;
183 ht->nInternalPointer = HT_INVALID_IDX;
184 ht->nNextFreeElement = 0;
185 ht->pDestructor = pDestructor;
186 }
187
188 static void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
189 {
190 HT_ASSERT(GC_REFCOUNT(ht) == 1);
191 if (ht->nTableSize >= HT_MAX_SIZE) {
192 zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
193 }
194 HANDLE_BLOCK_INTERRUPTIONS();
195 ht->nTableSize += ht->nTableSize;
196 HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
197 HANDLE_UNBLOCK_INTERRUPTIONS();
198 }
199
200 ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, zend_bool packed)
201 {
202 IS_CONSISTENT(ht);
203
204 HT_ASSERT(GC_REFCOUNT(ht) == 1);
205 zend_hash_real_init_ex(ht, packed);
206 }
207
208 ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
209 {
210 void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
211 Bucket *old_buckets = ht->arData;
212
213 HT_ASSERT(GC_REFCOUNT(ht) == 1);
214 HANDLE_BLOCK_INTERRUPTIONS();
215 ht->u.flags &= ~HASH_FLAG_PACKED;
216 new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, -ht->nTableSize), (ht)->u.flags & HASH_FLAG_PERSISTENT);
217 ht->nTableMask = -ht->nTableSize;
218 HT_SET_DATA_ADDR(ht, new_data);
219 memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
220 pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT);
221 zend_hash_rehash(ht);
222 HANDLE_UNBLOCK_INTERRUPTIONS();
223 }
224
225 ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
226 {
227 void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
228 Bucket *old_buckets = ht->arData;
229
230 HT_ASSERT(GC_REFCOUNT(ht) == 1);
231 HANDLE_BLOCK_INTERRUPTIONS();
232 new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (ht)->u.flags & HASH_FLAG_PERSISTENT);
233 ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
234 ht->nTableMask = HT_MIN_MASK;
235 HT_SET_DATA_ADDR(ht, new_data);
236 HT_HASH_RESET_PACKED(ht);
237 memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
238 pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT);
239 HANDLE_UNBLOCK_INTERRUPTIONS();
240 }
241
242 ZEND_API void ZEND_FASTCALL _zend_hash_init_ex(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent, zend_bool bApplyProtection ZEND_FILE_LINE_DC)
243 {
244 _zend_hash_init(ht, nSize, pDestructor, persistent ZEND_FILE_LINE_RELAY_CC);
245 if (!bApplyProtection) {
246 ht->u.flags &= ~HASH_FLAG_APPLY_PROTECTION;
247 }
248 }
249
250 ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend_bool packed)
251 {
252 HT_ASSERT(GC_REFCOUNT(ht) == 1);
253 if (nSize == 0) return;
254 if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) {
255 if (nSize > ht->nTableSize) {
256 ht->nTableSize = zend_hash_check_size(nSize);
257 }
258 zend_hash_check_init(ht, packed);
259 } else {
260 if (packed) {
261 ZEND_ASSERT(ht->u.flags & HASH_FLAG_PACKED);
262 if (nSize > ht->nTableSize) {
263 HANDLE_BLOCK_INTERRUPTIONS();
264 ht->nTableSize = zend_hash_check_size(nSize);
265 HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
266 HANDLE_UNBLOCK_INTERRUPTIONS();
267 }
268 } else {
269 ZEND_ASSERT(!(ht->u.flags & HASH_FLAG_PACKED));
270 if (nSize > ht->nTableSize) {
271 void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
272 Bucket *old_buckets = ht->arData;
273 nSize = zend_hash_check_size(nSize);
274 HANDLE_BLOCK_INTERRUPTIONS();
275 new_data = pemalloc(HT_SIZE_EX(nSize, -nSize), ht->u.flags & HASH_FLAG_PERSISTENT);
276 ht->nTableSize = nSize;
277 ht->nTableMask = -ht->nTableSize;
278 HT_SET_DATA_ADDR(ht, new_data);
279 memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
280 pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT);
281 zend_hash_rehash(ht);
282 HANDLE_UNBLOCK_INTERRUPTIONS();
283 }
284 }
285 }
286 }
287
288 static uint32_t zend_array_recalc_elements(HashTable *ht)
289 {
290 zval *val;
291 uint32_t num = ht->nNumOfElements;
292
293 ZEND_HASH_FOREACH_VAL(ht, val) {
294 if (Z_TYPE_P(val) == IS_UNDEF) continue;
295 if (Z_TYPE_P(val) == IS_INDIRECT) {
296 if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
297 num--;
298 }
299 }
300 } ZEND_HASH_FOREACH_END();
301 return num;
302 }
303
304
305 ZEND_API uint32_t zend_array_count(HashTable *ht)
306 {
307 uint32_t num;
308 if (UNEXPECTED(ht->u.v.flags & HASH_FLAG_HAS_EMPTY_IND)) {
309 num = zend_array_recalc_elements(ht);
310 if (UNEXPECTED(ht->nNumOfElements == num)) {
311 ht->u.v.flags &= ~HASH_FLAG_HAS_EMPTY_IND;
312 }
313 } else if (UNEXPECTED(ht == &EG(symbol_table))) {
314 num = zend_array_recalc_elements(ht);
315 } else {
316 num = zend_hash_num_elements(ht);
317 }
318 return num;
319 }
320
321
322 ZEND_API void ZEND_FASTCALL zend_hash_set_apply_protection(HashTable *ht, zend_bool bApplyProtection)
323 {
324 if (bApplyProtection) {
325 ht->u.flags |= HASH_FLAG_APPLY_PROTECTION;
326 } else {
327 ht->u.flags &= ~HASH_FLAG_APPLY_PROTECTION;
328 }
329 }
330
331 ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
332 {
333 HashTableIterator *iter = EG(ht_iterators);
334 HashTableIterator *end = iter + EG(ht_iterators_count);
335 uint32_t idx;
336
337 if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
338 ht->u.v.nIteratorsCount++;
339 }
340 while (iter != end) {
341 if (iter->ht == NULL) {
342 iter->ht = ht;
343 iter->pos = pos;
344 idx = iter - EG(ht_iterators);
345 if (idx + 1 > EG(ht_iterators_used)) {
346 EG(ht_iterators_used) = idx + 1;
347 }
348 return idx;
349 }
350 iter++;
351 }
352 if (EG(ht_iterators) == EG(ht_iterators_slots)) {
353 EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
354 memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
355 } else {
356 EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
357 }
358 iter = EG(ht_iterators) + EG(ht_iterators_count);
359 EG(ht_iterators_count) += 8;
360 iter->ht = ht;
361 iter->pos = pos;
362 memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
363 idx = iter - EG(ht_iterators);
364 EG(ht_iterators_used) = idx + 1;
365 return idx;
366 }
367
368 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
369 {
370 HashTableIterator *iter = EG(ht_iterators) + idx;
371
372 ZEND_ASSERT(idx != (uint32_t)-1);
373 if (iter->pos == HT_INVALID_IDX) {
374 return HT_INVALID_IDX;
375 } else if (UNEXPECTED(iter->ht != ht)) {
376 if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
377 && EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
378 iter->ht->u.v.nIteratorsCount--;
379 }
380 if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
381 ht->u.v.nIteratorsCount++;
382 }
383 iter->ht = ht;
384 iter->pos = ht->nInternalPointer;
385 }
386 return iter->pos;
387 }
388
389 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
390 {
391 HashTable *ht = Z_ARRVAL_P(array);
392 HashTableIterator *iter = EG(ht_iterators) + idx;
393
394 ZEND_ASSERT(idx != (uint32_t)-1);
395 if (iter->pos == HT_INVALID_IDX) {
396 return HT_INVALID_IDX;
397 } else if (UNEXPECTED(iter->ht != ht)) {
398 if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
399 && EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
400 iter->ht->u.v.nIteratorsCount--;
401 }
402 SEPARATE_ARRAY(array);
403 ht = Z_ARRVAL_P(array);
404 if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
405 ht->u.v.nIteratorsCount++;
406 }
407 iter->ht = ht;
408 iter->pos = ht->nInternalPointer;
409 }
410 return iter->pos;
411 }
412
413 ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
414 {
415 HashTableIterator *iter = EG(ht_iterators) + idx;
416
417 ZEND_ASSERT(idx != (uint32_t)-1);
418
419 if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
420 && EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
421 iter->ht->u.v.nIteratorsCount--;
422 }
423 iter->ht = NULL;
424
425 if (idx == EG(ht_iterators_used) - 1) {
426 while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
427 idx--;
428 }
429 EG(ht_iterators_used) = idx;
430 }
431 }
432
433 static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
434 {
435 HashTableIterator *iter = EG(ht_iterators);
436 HashTableIterator *end = iter + EG(ht_iterators_used);
437
438 while (iter != end) {
439 if (iter->ht == ht) {
440 iter->ht = HT_POISONED_PTR;
441 }
442 iter++;
443 }
444 }
445
446 static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
447 {
448 if (UNEXPECTED(ht->u.v.nIteratorsCount)) {
449 _zend_hash_iterators_remove(ht);
450 }
451 }
452
453 ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
454 {
455 HashTableIterator *iter = EG(ht_iterators);
456 HashTableIterator *end = iter + EG(ht_iterators_used);
457 HashPosition res = HT_INVALID_IDX;
458
459 while (iter != end) {
460 if (iter->ht == ht) {
461 if (iter->pos >= start && iter->pos < res) {
462 res = iter->pos;
463 }
464 }
465 iter++;
466 }
467 return res;
468 }
469
470 ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
471 {
472 HashTableIterator *iter = EG(ht_iterators);
473 HashTableIterator *end = iter + EG(ht_iterators_used);
474
475 while (iter != end) {
476 if (iter->ht == ht && iter->pos == from) {
477 iter->pos = to;
478 }
479 iter++;
480 }
481 }
482
483 static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, zend_string *key)
484 {
485 zend_ulong h;
486 uint32_t nIndex;
487 uint32_t idx;
488 Bucket *p, *arData;
489
490 h = zend_string_hash_val(key);
491 arData = ht->arData;
492 nIndex = h | ht->nTableMask;
493 idx = HT_HASH_EX(arData, nIndex);
494 while (EXPECTED(idx != HT_INVALID_IDX)) {
495 p = HT_HASH_TO_BUCKET_EX(arData, idx);
496 if (EXPECTED(p->key == key)) {
497 return p;
498 } else if (EXPECTED(p->h == h) &&
499 EXPECTED(p->key) &&
500 EXPECTED(ZSTR_LEN(p->key) == ZSTR_LEN(key)) &&
501 EXPECTED(memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
502 return p;
503 }
504 idx = Z_NEXT(p->val);
505 }
506 return NULL;
507 }
508
509 static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
510 {
511 uint32_t nIndex;
512 uint32_t idx;
513 Bucket *p, *arData;
514
515 arData = ht->arData;
516 nIndex = h | ht->nTableMask;
517 idx = HT_HASH_EX(arData, nIndex);
518 while (idx != HT_INVALID_IDX) {
519 ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
520 p = HT_HASH_TO_BUCKET_EX(arData, idx);
521 if ((p->h == h)
522 && p->key
523 && (ZSTR_LEN(p->key) == len)
524 && !memcmp(ZSTR_VAL(p->key), str, len)) {
525 return p;
526 }
527 idx = Z_NEXT(p->val);
528 }
529 return NULL;
530 }
531
532 static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
533 {
534 uint32_t nIndex;
535 uint32_t idx;
536 Bucket *p, *arData;
537
538 arData = ht->arData;
539 nIndex = h | ht->nTableMask;
540 idx = HT_HASH_EX(arData, nIndex);
541 while (idx != HT_INVALID_IDX) {
542 ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
543 p = HT_HASH_TO_BUCKET_EX(arData, idx);
544 if (p->h == h && !p->key) {
545 return p;
546 }
547 idx = Z_NEXT(p->val);
548 }
549 return NULL;
550 }
551
552 static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
553 {
554 zend_ulong h;
555 uint32_t nIndex;
556 uint32_t idx;
557 Bucket *p;
558
559 IS_CONSISTENT(ht);
560 HT_ASSERT(GC_REFCOUNT(ht) == 1);
561
562 if (UNEXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
563 CHECK_INIT(ht, 0);
564 goto add_to_hash;
565 } else if (ht->u.flags & HASH_FLAG_PACKED) {
566 zend_hash_packed_to_hash(ht);
567 } else if ((flag & HASH_ADD_NEW) == 0) {
568 p = zend_hash_find_bucket(ht, key);
569
570 if (p) {
571 zval *data;
572
573 if (flag & HASH_ADD) {
574 if (!(flag & HASH_UPDATE_INDIRECT)) {
575 return NULL;
576 }
577 ZEND_ASSERT(&p->val != pData);
578 data = &p->val;
579 if (Z_TYPE_P(data) == IS_INDIRECT) {
580 data = Z_INDIRECT_P(data);
581 if (Z_TYPE_P(data) != IS_UNDEF) {
582 return NULL;
583 }
584 } else {
585 return NULL;
586 }
587 } else {
588 ZEND_ASSERT(&p->val != pData);
589 data = &p->val;
590 if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
591 data = Z_INDIRECT_P(data);
592 }
593 }
594 HANDLE_BLOCK_INTERRUPTIONS();
595 if (ht->pDestructor) {
596 ht->pDestructor(data);
597 }
598 ZVAL_COPY_VALUE(data, pData);
599 HANDLE_UNBLOCK_INTERRUPTIONS();
600 return data;
601 }
602 }
603
604 ZEND_HASH_IF_FULL_DO_RESIZE(ht);
605
606 add_to_hash:
607 HANDLE_BLOCK_INTERRUPTIONS();
608 idx = ht->nNumUsed++;
609 ht->nNumOfElements++;
610 if (ht->nInternalPointer == HT_INVALID_IDX) {
611 ht->nInternalPointer = idx;
612 }
613 zend_hash_iterators_update(ht, HT_INVALID_IDX, idx);
614 p = ht->arData + idx;
615 p->key = key;
616 if (!ZSTR_IS_INTERNED(key)) {
617 zend_string_addref(key);
618 ht->u.flags &= ~HASH_FLAG_STATIC_KEYS;
619 zend_string_hash_val(key);
620 }
621 p->h = h = ZSTR_H(key);
622 ZVAL_COPY_VALUE(&p->val, pData);
623 nIndex = h | ht->nTableMask;
624 Z_NEXT(p->val) = HT_HASH(ht, nIndex);
625 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
626 HANDLE_UNBLOCK_INTERRUPTIONS();
627
628 return &p->val;
629 }
630
631 ZEND_API zval* ZEND_FASTCALL _zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
632 {
633 return _zend_hash_add_or_update_i(ht, key, pData, flag ZEND_FILE_LINE_RELAY_CC);
634 }
635
636 ZEND_API zval* ZEND_FASTCALL _zend_hash_add(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
637 {
638 return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
639 }
640
641 ZEND_API zval* ZEND_FASTCALL _zend_hash_update(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
642 {
643 return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
644 }
645
646 ZEND_API zval* ZEND_FASTCALL _zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
647 {
648 return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
649 }
650
651 ZEND_API zval* ZEND_FASTCALL _zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
652 {
653 return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
654 }
655
656 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
657 {
658 zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
659 zval *ret = _zend_hash_add_or_update_i(ht, key, pData, flag ZEND_FILE_LINE_RELAY_CC);
660 zend_string_release(key);
661 return ret;
662 }
663
664 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
665 {
666 zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
667 zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
668 zend_string_release(key);
669 return ret;
670 }
671
672 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
673 {
674 zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
675 zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
676 zend_string_release(key);
677 return ret;
678 }
679
680 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
681 {
682 zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
683 zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
684 zend_string_release(key);
685 return ret;
686 }
687
688 ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
689 {
690 zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
691 zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
692 zend_string_delref(key);
693 return ret;
694 }
695
696 ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
697 {
698 zval dummy;
699
700 ZVAL_NULL(&dummy);
701 return zend_hash_index_add(ht, h, &dummy);
702 }
703
704 ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
705 {
706 zval dummy;
707
708 ZVAL_NULL(&dummy);
709 return zend_hash_add(ht, key, &dummy);
710 }
711
712 ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
713 {
714 zval dummy;
715
716 ZVAL_NULL(&dummy);
717 return zend_hash_str_add(ht, str, len, &dummy);
718 }
719
720 static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
721 {
722 uint32_t nIndex;
723 uint32_t idx;
724 Bucket *p;
725
726 IS_CONSISTENT(ht);
727 HT_ASSERT(GC_REFCOUNT(ht) == 1);
728
729 if (UNEXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
730 CHECK_INIT(ht, h < ht->nTableSize);
731 if (h < ht->nTableSize) {
732 p = ht->arData + h;
733 goto add_to_packed;
734 }
735 goto add_to_hash;
736 } else if (ht->u.flags & HASH_FLAG_PACKED) {
737 if (h < ht->nNumUsed) {
738 p = ht->arData + h;
739 if (Z_TYPE(p->val) != IS_UNDEF) {
740 if (flag & HASH_ADD) {
741 return NULL;
742 }
743 if (ht->pDestructor) {
744 ht->pDestructor(&p->val);
745 }
746 ZVAL_COPY_VALUE(&p->val, pData);
747 if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
748 ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
749 }
750 return &p->val;
751 } else {
752 goto convert_to_hash;
753 }
754 } else if (EXPECTED(h < ht->nTableSize)) {
755 p = ht->arData + h;
756 } else if ((h >> 1) < ht->nTableSize &&
757 (ht->nTableSize >> 1) < ht->nNumOfElements) {
758 zend_hash_packed_grow(ht);
759 p = ht->arData + h;
760 } else {
761 goto convert_to_hash;
762 }
763
764 add_to_packed:
765 HANDLE_BLOCK_INTERRUPTIONS();
766
767 if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) == (HASH_ADD_NEW|HASH_ADD_NEXT)) {
768 ht->nNumUsed = h + 1;
769 } else if (h >= ht->nNumUsed) {
770 if (h > ht->nNumUsed) {
771 Bucket *q = ht->arData + ht->nNumUsed;
772 while (q != p) {
773 ZVAL_UNDEF(&q->val);
774 q++;
775 }
776 }
777 ht->nNumUsed = h + 1;
778 }
779 ht->nNumOfElements++;
780 if (ht->nInternalPointer == HT_INVALID_IDX) {
781 ht->nInternalPointer = h;
782 }
783 zend_hash_iterators_update(ht, HT_INVALID_IDX, h);
784 if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
785 ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
786 }
787 p->h = h;
788 p->key = NULL;
789 ZVAL_COPY_VALUE(&p->val, pData);
790
791 HANDLE_UNBLOCK_INTERRUPTIONS();
792
793 return &p->val;
794
795 convert_to_hash:
796 zend_hash_packed_to_hash(ht);
797 } else if ((flag & HASH_ADD_NEW) == 0) {
798 p = zend_hash_index_find_bucket(ht, h);
799 if (p) {
800 if (flag & HASH_ADD) {
801 return NULL;
802 }
803 ZEND_ASSERT(&p->val != pData);
804 HANDLE_BLOCK_INTERRUPTIONS();
805 if (ht->pDestructor) {
806 ht->pDestructor(&p->val);
807 }
808 ZVAL_COPY_VALUE(&p->val, pData);
809 HANDLE_UNBLOCK_INTERRUPTIONS();
810 if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
811 ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
812 }
813 return &p->val;
814 }
815 }
816
817 ZEND_HASH_IF_FULL_DO_RESIZE(ht);
818
819 add_to_hash:
820 HANDLE_BLOCK_INTERRUPTIONS();
821 idx = ht->nNumUsed++;
822 ht->nNumOfElements++;
823 if (ht->nInternalPointer == HT_INVALID_IDX) {
824 ht->nInternalPointer = idx;
825 }
826 zend_hash_iterators_update(ht, HT_INVALID_IDX, idx);
827 if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
828 ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
829 }
830 p = ht->arData + idx;
831 p->h = h;
832 p->key = NULL;
833 nIndex = h | ht->nTableMask;
834 ZVAL_COPY_VALUE(&p->val, pData);
835 Z_NEXT(p->val) = HT_HASH(ht, nIndex);
836 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
837 HANDLE_UNBLOCK_INTERRUPTIONS();
838
839 return &p->val;
840 }
841
842 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
843 {
844 return _zend_hash_index_add_or_update_i(ht, h, pData, flag ZEND_FILE_LINE_RELAY_CC);
845 }
846
847 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
848 {
849 return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
850 }
851
852 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
853 {
854 return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
855 }
856
857 ZEND_API zval* ZEND_FASTCALL _zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
858 {
859 return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
860 }
861
862 ZEND_API zval* ZEND_FASTCALL _zend_hash_next_index_insert(HashTable *ht, zval *pData ZEND_FILE_LINE_DC)
863 {
864 return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT ZEND_FILE_LINE_RELAY_CC);
865 }
866
867 ZEND_API zval* ZEND_FASTCALL _zend_hash_next_index_insert_new(HashTable *ht, zval *pData ZEND_FILE_LINE_DC)
868 {
869 return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT ZEND_FILE_LINE_RELAY_CC);
870 }
871
872 static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
873 {
874
875 IS_CONSISTENT(ht);
876 HT_ASSERT(GC_REFCOUNT(ht) == 1);
877
878 if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) {
879 HANDLE_BLOCK_INTERRUPTIONS();
880 zend_hash_rehash(ht);
881 HANDLE_UNBLOCK_INTERRUPTIONS();
882 } else if (ht->nTableSize < HT_MAX_SIZE) {
883 void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
884 uint32_t nSize = ht->nTableSize + ht->nTableSize;
885 Bucket *old_buckets = ht->arData;
886
887 HANDLE_BLOCK_INTERRUPTIONS();
888 new_data = pemalloc(HT_SIZE_EX(nSize, -nSize), ht->u.flags & HASH_FLAG_PERSISTENT);
889 ht->nTableSize = nSize;
890 ht->nTableMask = -ht->nTableSize;
891 HT_SET_DATA_ADDR(ht, new_data);
892 memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
893 pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT);
894 zend_hash_rehash(ht);
895 HANDLE_UNBLOCK_INTERRUPTIONS();
896 } else {
897 zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
898 }
899 }
900
901 ZEND_API int ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
902 {
903 Bucket *p;
904 uint32_t nIndex, i;
905
906 IS_CONSISTENT(ht);
907
908 if (UNEXPECTED(ht->nNumOfElements == 0)) {
909 if (ht->u.flags & HASH_FLAG_INITIALIZED) {
910 ht->nNumUsed = 0;
911 HT_HASH_RESET(ht);
912 }
913 return SUCCESS;
914 }
915
916 HT_HASH_RESET(ht);
917 i = 0;
918 p = ht->arData;
919 if (ht->nNumUsed == ht->nNumOfElements) {
920 do {
921 nIndex = p->h | ht->nTableMask;
922 Z_NEXT(p->val) = HT_HASH(ht, nIndex);
923 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
924 p++;
925 } while (++i < ht->nNumUsed);
926 } else {
927 do {
928 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
929 uint32_t j = i;
930 Bucket *q = p;
931
932 if (EXPECTED(ht->u.v.nIteratorsCount == 0)) {
933 while (++i < ht->nNumUsed) {
934 p++;
935 if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
936 ZVAL_COPY_VALUE(&q->val, &p->val);
937 q->h = p->h;
938 nIndex = q->h | ht->nTableMask;
939 q->key = p->key;
940 Z_NEXT(q->val) = HT_HASH(ht, nIndex);
941 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
942 if (UNEXPECTED(ht->nInternalPointer == i)) {
943 ht->nInternalPointer = j;
944 }
945 q++;
946 j++;
947 }
948 }
949 } else {
950 uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, 0);
951
952 while (++i < ht->nNumUsed) {
953 p++;
954 if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
955 ZVAL_COPY_VALUE(&q->val, &p->val);
956 q->h = p->h;
957 nIndex = q->h | ht->nTableMask;
958 q->key = p->key;
959 Z_NEXT(q->val) = HT_HASH(ht, nIndex);
960 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
961 if (UNEXPECTED(ht->nInternalPointer == i)) {
962 ht->nInternalPointer = j;
963 }
964 if (UNEXPECTED(i == iter_pos)) {
965 zend_hash_iterators_update(ht, i, j);
966 iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
967 }
968 q++;
969 j++;
970 }
971 }
972 }
973 ht->nNumUsed = j;
974 break;
975 }
976 nIndex = p->h | ht->nTableMask;
977 Z_NEXT(p->val) = HT_HASH(ht, nIndex);
978 HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
979 p++;
980 } while (++i < ht->nNumUsed);
981 }
982 return SUCCESS;
983 }
984
985 static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
986 {
987 HANDLE_BLOCK_INTERRUPTIONS();
988 if (!(ht->u.flags & HASH_FLAG_PACKED)) {
989 if (prev) {
990 Z_NEXT(prev->val) = Z_NEXT(p->val);
991 } else {
992 HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
993 }
994 }
995 if (HT_IDX_TO_HASH(ht->nNumUsed - 1) == idx) {
996 do {
997 ht->nNumUsed--;
998 } while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
999 }
1000 ht->nNumOfElements--;
1001 if (HT_IDX_TO_HASH(ht->nInternalPointer) == idx || UNEXPECTED(ht->u.v.nIteratorsCount)) {
1002 uint32_t new_idx;
1003
1004 new_idx = idx = HT_HASH_TO_IDX(idx);
1005 while (1) {
1006 new_idx++;
1007 if (new_idx >= ht->nNumUsed) {
1008 new_idx = HT_INVALID_IDX;
1009 break;
1010 } else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
1011 break;
1012 }
1013 }
1014 if (ht->nInternalPointer == idx) {
1015 ht->nInternalPointer = new_idx;
1016 }
1017 zend_hash_iterators_update(ht, idx, new_idx);
1018 }
1019 if (p->key) {
1020 zend_string_release(p->key);
1021 }
1022 if (ht->pDestructor) {
1023 zval tmp;
1024 ZVAL_COPY_VALUE(&tmp, &p->val);
1025 ZVAL_UNDEF(&p->val);
1026 ht->pDestructor(&tmp);
1027 } else {
1028 ZVAL_UNDEF(&p->val);
1029 }
1030 HANDLE_UNBLOCK_INTERRUPTIONS();
1031 }
1032
1033 static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
1034 {
1035 Bucket *prev = NULL;
1036
1037 if (!(ht->u.flags & HASH_FLAG_PACKED)) {
1038 uint32_t nIndex = p->h | ht->nTableMask;
1039 uint32_t i = HT_HASH(ht, nIndex);
1040
1041 if (i != idx) {
1042 prev = HT_HASH_TO_BUCKET(ht, i);
1043 while (Z_NEXT(prev->val) != idx) {
1044 i = Z_NEXT(prev->val);
1045 prev = HT_HASH_TO_BUCKET(ht, i);
1046 }
1047 }
1048 }
1049
1050 _zend_hash_del_el_ex(ht, idx, p, prev);
1051 }
1052
1053 ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
1054 {
1055 IS_CONSISTENT(ht);
1056 HT_ASSERT(GC_REFCOUNT(ht) == 1);
1057 _zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
1058 }
1059
1060 ZEND_API int ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
1061 {
1062 zend_ulong h;
1063 uint32_t nIndex;
1064 uint32_t idx;
1065 Bucket *p;
1066 Bucket *prev = NULL;
1067
1068 IS_CONSISTENT(ht);
1069 HT_ASSERT(GC_REFCOUNT(ht) == 1);
1070
1071 h = zend_string_hash_val(key);
1072 nIndex = h | ht->nTableMask;
1073
1074 idx = HT_HASH(ht, nIndex);
1075 while (idx != HT_INVALID_IDX) {
1076 p = HT_HASH_TO_BUCKET(ht, idx);
1077 if ((p->key == key) ||
1078 (p->h == h &&
1079 p->key &&
1080 ZSTR_LEN(p->key) == ZSTR_LEN(key) &&
1081 memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
1082 _zend_hash_del_el_ex(ht, idx, p, prev);
1083 return SUCCESS;
1084 }
1085 prev = p;
1086 idx = Z_NEXT(p->val);
1087 }
1088 return FAILURE;
1089 }
1090
1091 ZEND_API int ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
1092 {
1093 zend_ulong h;
1094 uint32_t nIndex;
1095 uint32_t idx;
1096 Bucket *p;
1097 Bucket *prev = NULL;
1098
1099 IS_CONSISTENT(ht);
1100 HT_ASSERT(GC_REFCOUNT(ht) == 1);
1101
1102 h = zend_string_hash_val(key);
1103 nIndex = h | ht->nTableMask;
1104
1105 idx = HT_HASH(ht, nIndex);
1106 while (idx != HT_INVALID_IDX) {
1107 p = HT_HASH_TO_BUCKET(ht, idx);
1108 if ((p->key == key) ||
1109 (p->h == h &&
1110 p->key &&
1111 ZSTR_LEN(p->key) == ZSTR_LEN(key) &&
1112 memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
1113 if (Z_TYPE(p->val) == IS_INDIRECT) {
1114 zval *data = Z_INDIRECT(p->val);
1115
1116 if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1117 return FAILURE;
1118 } else {
1119 if (ht->pDestructor) {
1120 zval tmp;
1121 ZVAL_COPY_VALUE(&tmp, data);
1122 ZVAL_UNDEF(data);
1123 ht->pDestructor(&tmp);
1124 } else {
1125 ZVAL_UNDEF(data);
1126 }
1127 ht->u.v.flags |= HASH_FLAG_HAS_EMPTY_IND;
1128 }
1129 } else {
1130 _zend_hash_del_el_ex(ht, idx, p, prev);
1131 }
1132 return SUCCESS;
1133 }
1134 prev = p;
1135 idx = Z_NEXT(p->val);
1136 }
1137 return FAILURE;
1138 }
1139
1140 ZEND_API int ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
1141 {
1142 zend_ulong h;
1143 uint32_t nIndex;
1144 uint32_t idx;
1145 Bucket *p;
1146 Bucket *prev = NULL;
1147
1148 IS_CONSISTENT(ht);
1149 HT_ASSERT(GC_REFCOUNT(ht) == 1);
1150
1151 h = zend_inline_hash_func(str, len);
1152 nIndex = h | ht->nTableMask;
1153
1154 idx = HT_HASH(ht, nIndex);
1155 while (idx != HT_INVALID_IDX) {
1156 p = HT_HASH_TO_BUCKET(ht, idx);
1157 if ((p->h == h)
1158 && p->key
1159 && (ZSTR_LEN(p->key) == len)
1160 && !memcmp(ZSTR_VAL(p->key), str, len)) {
1161 if (Z_TYPE(p->val) == IS_INDIRECT) {
1162 zval *data = Z_INDIRECT(p->val);
1163
1164 if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1165 return FAILURE;
1166 } else {
1167 if (ht->pDestructor) {
1168 ht->pDestructor(data);
1169 }
1170 ZVAL_UNDEF(data);
1171 ht->u.v.flags |= HASH_FLAG_HAS_EMPTY_IND;
1172 }
1173 } else {
1174 _zend_hash_del_el_ex(ht, idx, p, prev);
1175 }
1176 return SUCCESS;
1177 }
1178 prev = p;
1179 idx = Z_NEXT(p->val);
1180 }
1181 return FAILURE;
1182 }
1183
1184 ZEND_API int ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
1185 {
1186 zend_ulong h;
1187 uint32_t nIndex;
1188 uint32_t idx;
1189 Bucket *p;
1190 Bucket *prev = NULL;
1191
1192 IS_CONSISTENT(ht);
1193 HT_ASSERT(GC_REFCOUNT(ht) == 1);
1194
1195 h = zend_inline_hash_func(str, len);
1196 nIndex = h | ht->nTableMask;
1197
1198 idx = HT_HASH(ht, nIndex);
1199 while (idx != HT_INVALID_IDX) {
1200 p = HT_HASH_TO_BUCKET(ht, idx);
1201 if ((p->h == h)
1202 && p->key
1203 && (ZSTR_LEN(p->key) == len)
1204 && !memcmp(ZSTR_VAL(p->key), str, len)) {
1205 _zend_hash_del_el_ex(ht, idx, p, prev);
1206 return SUCCESS;
1207 }
1208 prev = p;
1209 idx = Z_NEXT(p->val);
1210 }
1211 return FAILURE;
1212 }
1213
1214 ZEND_API int ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
1215 {
1216 uint32_t nIndex;
1217 uint32_t idx;
1218 Bucket *p;
1219 Bucket *prev = NULL;
1220
1221 IS_CONSISTENT(ht);
1222 HT_ASSERT(GC_REFCOUNT(ht) == 1);
1223
1224 if (ht->u.flags & HASH_FLAG_PACKED) {
1225 if (h < ht->nNumUsed) {
1226 p = ht->arData + h;
1227 if (Z_TYPE(p->val) != IS_UNDEF) {
1228 _zend_hash_del_el_ex(ht, HT_IDX_TO_HASH(h), p, NULL);
1229 return SUCCESS;
1230 }
1231 }
1232 return FAILURE;
1233 }
1234 nIndex = h | ht->nTableMask;
1235
1236 idx = HT_HASH(ht, nIndex);
1237 while (idx != HT_INVALID_IDX) {
1238 p = HT_HASH_TO_BUCKET(ht, idx);
1239 if ((p->h == h) && (p->key == NULL)) {
1240 _zend_hash_del_el_ex(ht, idx, p, prev);
1241 return SUCCESS;
1242 }
1243 prev = p;
1244 idx = Z_NEXT(p->val);
1245 }
1246 return FAILURE;
1247 }
1248
1249 ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
1250 {
1251 Bucket *p, *end;
1252
1253 IS_CONSISTENT(ht);
1254 HT_ASSERT(GC_REFCOUNT(ht) <= 1);
1255
1256 if (ht->nNumUsed) {
1257 p = ht->arData;
1258 end = p + ht->nNumUsed;
1259 if (ht->pDestructor) {
1260 SET_INCONSISTENT(HT_IS_DESTROYING);
1261
1262 if (ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS)) {
1263 if (ht->nNumUsed == ht->nNumOfElements) {
1264 do {
1265 ht->pDestructor(&p->val);
1266 } while (++p != end);
1267 } else {
1268 do {
1269 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1270 ht->pDestructor(&p->val);
1271 }
1272 } while (++p != end);
1273 }
1274 } else if (ht->nNumUsed == ht->nNumOfElements) {
1275 do {
1276 ht->pDestructor(&p->val);
1277 if (EXPECTED(p->key)) {
1278 zend_string_release(p->key);
1279 }
1280 } while (++p != end);
1281 } else {
1282 do {
1283 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1284 ht->pDestructor(&p->val);
1285 if (EXPECTED(p->key)) {
1286 zend_string_release(p->key);
1287 }
1288 }
1289 } while (++p != end);
1290 }
1291
1292 SET_INCONSISTENT(HT_DESTROYED);
1293 } else {
1294 if (!(ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS))) {
1295 do {
1296 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1297 if (EXPECTED(p->key)) {
1298 zend_string_release(p->key);
1299 }
1300 }
1301 } while (++p != end);
1302 }
1303 }
1304 zend_hash_iterators_remove(ht);
1305 } else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
1306 return;
1307 }
1308 pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
1309 }
1310
1311 ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
1312 {
1313 Bucket *p, *end;
1314
1315 IS_CONSISTENT(ht);
1316 HT_ASSERT(GC_REFCOUNT(ht) <= 1);
1317
1318
1319 GC_REMOVE_FROM_BUFFER(ht);
1320 GC_TYPE_INFO(ht) = IS_NULL | (GC_WHITE << 16);
1321
1322 if (ht->nNumUsed) {
1323
1324 if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
1325 zend_hash_destroy(ht);
1326 goto free_ht;
1327 }
1328
1329 p = ht->arData;
1330 end = p + ht->nNumUsed;
1331 SET_INCONSISTENT(HT_IS_DESTROYING);
1332
1333 if (ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS)) {
1334 do {
1335 i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1336 } while (++p != end);
1337 } else if (ht->nNumUsed == ht->nNumOfElements) {
1338 do {
1339 i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1340 if (EXPECTED(p->key)) {
1341 zend_string_release(p->key);
1342 }
1343 } while (++p != end);
1344 } else {
1345 do {
1346 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1347 i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1348 if (EXPECTED(p->key)) {
1349 zend_string_release(p->key);
1350 }
1351 }
1352 } while (++p != end);
1353 }
1354 zend_hash_iterators_remove(ht);
1355 SET_INCONSISTENT(HT_DESTROYED);
1356 } else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
1357 goto free_ht;
1358 }
1359 efree(HT_GET_DATA_ADDR(ht));
1360 free_ht:
1361 FREE_HASHTABLE(ht);
1362 }
1363
1364 ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
1365 {
1366 Bucket *p, *end;
1367
1368 IS_CONSISTENT(ht);
1369 HT_ASSERT(GC_REFCOUNT(ht) == 1);
1370
1371 if (ht->nNumUsed) {
1372 p = ht->arData;
1373 end = p + ht->nNumUsed;
1374 if (ht->pDestructor) {
1375 if (ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS)) {
1376 if (ht->nNumUsed == ht->nNumOfElements) {
1377 do {
1378 ht->pDestructor(&p->val);
1379 } while (++p != end);
1380 } else {
1381 do {
1382 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1383 ht->pDestructor(&p->val);
1384 }
1385 } while (++p != end);
1386 }
1387 } else if (ht->nNumUsed == ht->nNumOfElements) {
1388 do {
1389 ht->pDestructor(&p->val);
1390 if (EXPECTED(p->key)) {
1391 zend_string_release(p->key);
1392 }
1393 } while (++p != end);
1394 } else {
1395 do {
1396 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1397 ht->pDestructor(&p->val);
1398 if (EXPECTED(p->key)) {
1399 zend_string_release(p->key);
1400 }
1401 }
1402 } while (++p != end);
1403 }
1404 } else {
1405 if (!(ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS))) {
1406 if (ht->nNumUsed == ht->nNumOfElements) {
1407 do {
1408 if (EXPECTED(p->key)) {
1409 zend_string_release(p->key);
1410 }
1411 } while (++p != end);
1412 } else {
1413 do {
1414 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1415 if (EXPECTED(p->key)) {
1416 zend_string_release(p->key);
1417 }
1418 }
1419 } while (++p != end);
1420 }
1421 }
1422 }
1423 if (!(ht->u.flags & HASH_FLAG_PACKED)) {
1424 HT_HASH_RESET(ht);
1425 }
1426 }
1427 ht->nNumUsed = 0;
1428 ht->nNumOfElements = 0;
1429 ht->nNextFreeElement = 0;
1430 ht->nInternalPointer = HT_INVALID_IDX;
1431 }
1432
1433 ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
1434 {
1435 Bucket *p, *end;
1436
1437 IS_CONSISTENT(ht);
1438 HT_ASSERT(GC_REFCOUNT(ht) == 1);
1439
1440 if (ht->nNumUsed) {
1441 p = ht->arData;
1442 end = p + ht->nNumUsed;
1443 if (ht->u.flags & HASH_FLAG_STATIC_KEYS) {
1444 do {
1445 i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1446 } while (++p != end);
1447 } else if (ht->nNumUsed == ht->nNumOfElements) {
1448 do {
1449 i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1450 zend_string_release(p->key);
1451 } while (++p != end);
1452 } else {
1453 do {
1454 if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
1455 i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
1456 zend_string_release(p->key);
1457 }
1458 } while (++p != end);
1459 }
1460 HT_HASH_RESET(ht);
1461 }
1462 ht->nNumUsed = 0;
1463 ht->nNumOfElements = 0;
1464 ht->nNextFreeElement = 0;
1465 ht->nInternalPointer = HT_INVALID_IDX;
1466 }
1467
1468 ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
1469 {
1470 uint32_t idx;
1471 Bucket *p;
1472
1473 IS_CONSISTENT(ht);
1474 HT_ASSERT(GC_REFCOUNT(ht) == 1);
1475
1476 p = ht->arData;
1477 for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
1478 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1479 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1480 }
1481 if (ht->u.flags & HASH_FLAG_INITIALIZED) {
1482 pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
1483 }
1484
1485 SET_INCONSISTENT(HT_DESTROYED);
1486 }
1487
1488 ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
1489 {
1490 uint32_t idx;
1491 Bucket *p;
1492
1493 IS_CONSISTENT(ht);
1494 HT_ASSERT(GC_REFCOUNT(ht) == 1);
1495
1496 idx = ht->nNumUsed;
1497 p = ht->arData + ht->nNumUsed;
1498 while (idx > 0) {
1499 idx--;
1500 p--;
1501 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1502 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1503 }
1504
1505 if (ht->u.flags & HASH_FLAG_INITIALIZED) {
1506 pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
1507 }
1508
1509 SET_INCONSISTENT(HT_DESTROYED);
1510 }
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521 ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
1522 {
1523 uint32_t idx;
1524 Bucket *p;
1525 int result;
1526
1527 IS_CONSISTENT(ht);
1528 HT_ASSERT(GC_REFCOUNT(ht) == 1);
1529
1530 HASH_PROTECT_RECURSION(ht);
1531 for (idx = 0; idx < ht->nNumUsed; idx++) {
1532 p = ht->arData + idx;
1533 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1534 result = apply_func(&p->val);
1535
1536 if (result & ZEND_HASH_APPLY_REMOVE) {
1537 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1538 }
1539 if (result & ZEND_HASH_APPLY_STOP) {
1540 break;
1541 }
1542 }
1543 HASH_UNPROTECT_RECURSION(ht);
1544 }
1545
1546
1547 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
1548 {
1549 uint32_t idx;
1550 Bucket *p;
1551 int result;
1552
1553 IS_CONSISTENT(ht);
1554 HT_ASSERT(GC_REFCOUNT(ht) == 1);
1555
1556 HASH_PROTECT_RECURSION(ht);
1557 for (idx = 0; idx < ht->nNumUsed; idx++) {
1558 p = ht->arData + idx;
1559 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1560 result = apply_func(&p->val, argument);
1561
1562 if (result & ZEND_HASH_APPLY_REMOVE) {
1563 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1564 }
1565 if (result & ZEND_HASH_APPLY_STOP) {
1566 break;
1567 }
1568 }
1569 HASH_UNPROTECT_RECURSION(ht);
1570 }
1571
1572
1573 ZEND_API void ZEND_FASTCALL zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
1574 {
1575 uint32_t idx;
1576 Bucket *p;
1577 va_list args;
1578 zend_hash_key hash_key;
1579 int result;
1580
1581 IS_CONSISTENT(ht);
1582 HT_ASSERT(GC_REFCOUNT(ht) == 1);
1583
1584 HASH_PROTECT_RECURSION(ht);
1585
1586 for (idx = 0; idx < ht->nNumUsed; idx++) {
1587 p = ht->arData + idx;
1588 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1589 va_start(args, num_args);
1590 hash_key.h = p->h;
1591 hash_key.key = p->key;
1592
1593 result = apply_func(&p->val, num_args, args, &hash_key);
1594
1595 if (result & ZEND_HASH_APPLY_REMOVE) {
1596 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1597 }
1598 if (result & ZEND_HASH_APPLY_STOP) {
1599 va_end(args);
1600 break;
1601 }
1602 va_end(args);
1603 }
1604
1605 HASH_UNPROTECT_RECURSION(ht);
1606 }
1607
1608
1609 ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
1610 {
1611 uint32_t idx;
1612 Bucket *p;
1613 int result;
1614
1615 IS_CONSISTENT(ht);
1616 HT_ASSERT(GC_REFCOUNT(ht) == 1);
1617
1618 HASH_PROTECT_RECURSION(ht);
1619 idx = ht->nNumUsed;
1620 while (idx > 0) {
1621 idx--;
1622 p = ht->arData + idx;
1623 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1624
1625 result = apply_func(&p->val);
1626
1627 if (result & ZEND_HASH_APPLY_REMOVE) {
1628 _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
1629 }
1630 if (result & ZEND_HASH_APPLY_STOP) {
1631 break;
1632 }
1633 }
1634 HASH_UNPROTECT_RECURSION(ht);
1635 }
1636
1637
1638 ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor)
1639 {
1640 uint32_t idx;
1641 Bucket *p;
1642 zval *new_entry, *data;
1643 zend_bool setTargetPointer;
1644
1645 IS_CONSISTENT(source);
1646 IS_CONSISTENT(target);
1647 HT_ASSERT(GC_REFCOUNT(target) == 1);
1648
1649 setTargetPointer = (target->nInternalPointer == HT_INVALID_IDX);
1650 for (idx = 0; idx < source->nNumUsed; idx++) {
1651 p = source->arData + idx;
1652 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1653
1654 if (setTargetPointer && source->nInternalPointer == idx) {
1655 target->nInternalPointer = HT_INVALID_IDX;
1656 }
1657
1658 data = &p->val;
1659 if (Z_TYPE_P(data) == IS_INDIRECT) {
1660 data = Z_INDIRECT_P(data);
1661 if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
1662 continue;
1663 }
1664 }
1665 if (p->key) {
1666 new_entry = zend_hash_update(target, p->key, data);
1667 } else {
1668 new_entry = zend_hash_index_update(target, p->h, data);
1669 }
1670 if (pCopyConstructor) {
1671 pCopyConstructor(new_entry);
1672 }
1673 }
1674 if (target->nInternalPointer == HT_INVALID_IDX && target->nNumOfElements > 0) {
1675 idx = 0;
1676 while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1677 idx++;
1678 }
1679 target->nInternalPointer = idx;
1680 }
1681 }
1682
1683
1684 static zend_always_inline int zend_array_dup_element(HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, int packed, int static_keys, int with_holes)
1685 {
1686 zval *data = &p->val;
1687
1688 if (with_holes) {
1689 if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
1690 data = Z_INDIRECT_P(data);
1691 }
1692 if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
1693 return 0;
1694 }
1695 } else if (!packed) {
1696
1697 if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
1698 data = Z_INDIRECT_P(data);
1699 if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
1700 return 0;
1701 }
1702 }
1703 }
1704
1705 do {
1706 if (Z_OPT_REFCOUNTED_P(data)) {
1707 if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
1708 (Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
1709 Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
1710 data = Z_REFVAL_P(data);
1711 if (!Z_OPT_REFCOUNTED_P(data)) {
1712 break;
1713 }
1714 }
1715 Z_ADDREF_P(data);
1716 }
1717 } while (0);
1718 ZVAL_COPY_VALUE(&q->val, data);
1719
1720 q->h = p->h;
1721 if (packed) {
1722 q->key = NULL;
1723 } else {
1724 uint32_t nIndex;
1725
1726 q->key = p->key;
1727 if (!static_keys && q->key) {
1728 zend_string_addref(q->key);
1729 }
1730
1731 nIndex = q->h | target->nTableMask;
1732 Z_NEXT(q->val) = HT_HASH(target, nIndex);
1733 HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
1734 }
1735 return 1;
1736 }
1737
1738 static zend_always_inline void zend_array_dup_packed_elements(HashTable *source, HashTable *target, int with_holes)
1739 {
1740 Bucket *p = source->arData;
1741 Bucket *q = target->arData;
1742 Bucket *end = p + source->nNumUsed;
1743
1744 do {
1745 if (!zend_array_dup_element(source, target, 0, p, q, 1, 1, with_holes)) {
1746 if (with_holes) {
1747 ZVAL_UNDEF(&q->val);
1748 }
1749 }
1750 p++; q++;
1751 } while (p != end);
1752 }
1753
1754 static zend_always_inline uint32_t zend_array_dup_elements(HashTable *source, HashTable *target, int static_keys, int with_holes)
1755 {
1756 uint32_t idx = 0;
1757 Bucket *p = source->arData;
1758 Bucket *q = target->arData;
1759 Bucket *end = p + source->nNumUsed;
1760
1761 do {
1762 if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
1763 uint32_t target_idx = idx;
1764
1765 idx++; p++;
1766 while (p != end) {
1767 if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
1768 if (source->nInternalPointer == idx) {
1769 target->nInternalPointer = target_idx;
1770 }
1771 target_idx++; q++;
1772 }
1773 idx++; p++;
1774 }
1775 return target_idx;
1776 }
1777 idx++; p++; q++;
1778 } while (p != end);
1779 return idx;
1780 }
1781
1782 ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(HashTable *source)
1783 {
1784 uint32_t idx;
1785 HashTable *target;
1786
1787 IS_CONSISTENT(source);
1788
1789 ALLOC_HASHTABLE(target);
1790 GC_REFCOUNT(target) = 1;
1791 GC_TYPE_INFO(target) = IS_ARRAY;
1792
1793 target->nTableSize = source->nTableSize;
1794 target->pDestructor = source->pDestructor;
1795
1796 if (source->nNumUsed == 0) {
1797 target->u.flags = (source->u.flags & ~(HASH_FLAG_INITIALIZED|HASH_FLAG_PACKED|HASH_FLAG_PERSISTENT)) | HASH_FLAG_APPLY_PROTECTION | HASH_FLAG_STATIC_KEYS;
1798 target->nTableMask = HT_MIN_MASK;
1799 target->nNumUsed = 0;
1800 target->nNumOfElements = 0;
1801 target->nNextFreeElement = 0;
1802 target->nInternalPointer = HT_INVALID_IDX;
1803 HT_SET_DATA_ADDR(target, &uninitialized_bucket);
1804 } else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
1805 target->u.flags = (source->u.flags & ~HASH_FLAG_PERSISTENT) | HASH_FLAG_APPLY_PROTECTION;
1806 target->nTableMask = source->nTableMask;
1807 target->nNumUsed = source->nNumUsed;
1808 target->nNumOfElements = source->nNumOfElements;
1809 target->nNextFreeElement = source->nNextFreeElement;
1810 HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
1811 target->nInternalPointer = source->nInternalPointer;
1812 memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
1813 if (target->nNumOfElements > 0 &&
1814 target->nInternalPointer == HT_INVALID_IDX) {
1815 idx = 0;
1816 while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1817 idx++;
1818 }
1819 target->nInternalPointer = idx;
1820 }
1821 } else if (source->u.flags & HASH_FLAG_PACKED) {
1822 target->u.flags = (source->u.flags & ~HASH_FLAG_PERSISTENT) | HASH_FLAG_APPLY_PROTECTION;
1823 target->nTableMask = source->nTableMask;
1824 target->nNumUsed = source->nNumUsed;
1825 target->nNumOfElements = source->nNumOfElements;
1826 target->nNextFreeElement = source->nNextFreeElement;
1827 HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
1828 target->nInternalPointer = source->nInternalPointer;
1829 HT_HASH_RESET_PACKED(target);
1830
1831 if (target->nNumUsed == target->nNumOfElements) {
1832 zend_array_dup_packed_elements(source, target, 0);
1833 } else {
1834 zend_array_dup_packed_elements(source, target, 1);
1835 }
1836 if (target->nNumOfElements > 0 &&
1837 target->nInternalPointer == HT_INVALID_IDX) {
1838 idx = 0;
1839 while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1840 idx++;
1841 }
1842 target->nInternalPointer = idx;
1843 }
1844 } else {
1845 target->u.flags = (source->u.flags & ~HASH_FLAG_PERSISTENT) | HASH_FLAG_APPLY_PROTECTION;
1846 target->nTableMask = source->nTableMask;
1847 target->nNextFreeElement = source->nNextFreeElement;
1848 target->nInternalPointer = HT_INVALID_IDX;
1849 HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
1850 HT_HASH_RESET(target);
1851
1852 if (target->u.flags & HASH_FLAG_STATIC_KEYS) {
1853 if (source->nNumUsed == source->nNumOfElements) {
1854 idx = zend_array_dup_elements(source, target, 1, 0);
1855 } else {
1856 idx = zend_array_dup_elements(source, target, 1, 1);
1857 }
1858 } else {
1859 if (source->nNumUsed == source->nNumOfElements) {
1860 idx = zend_array_dup_elements(source, target, 0, 0);
1861 } else {
1862 idx = zend_array_dup_elements(source, target, 0, 1);
1863 }
1864 }
1865 target->nNumUsed = idx;
1866 target->nNumOfElements = idx;
1867 if (idx > 0 && target->nInternalPointer == HT_INVALID_IDX) {
1868 target->nInternalPointer = 0;
1869 }
1870 }
1871 return target;
1872 }
1873
1874
1875 ZEND_API void ZEND_FASTCALL _zend_hash_merge(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, zend_bool overwrite ZEND_FILE_LINE_DC)
1876 {
1877 uint32_t idx;
1878 Bucket *p;
1879 zval *t;
1880
1881 IS_CONSISTENT(source);
1882 IS_CONSISTENT(target);
1883 HT_ASSERT(GC_REFCOUNT(target) == 1);
1884
1885 if (overwrite) {
1886 for (idx = 0; idx < source->nNumUsed; idx++) {
1887 p = source->arData + idx;
1888 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1889 if (UNEXPECTED(Z_TYPE(p->val) == IS_INDIRECT) &&
1890 UNEXPECTED(Z_TYPE_P(Z_INDIRECT(p->val)) == IS_UNDEF)) {
1891 continue;
1892 }
1893 if (p->key) {
1894 t = _zend_hash_add_or_update_i(target, p->key, &p->val, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
1895 if (t && pCopyConstructor) {
1896 pCopyConstructor(t);
1897 }
1898 } else {
1899 t = zend_hash_index_update(target, p->h, &p->val);
1900 if (t && pCopyConstructor) {
1901 pCopyConstructor(t);
1902 }
1903 }
1904 }
1905 } else {
1906 for (idx = 0; idx < source->nNumUsed; idx++) {
1907 p = source->arData + idx;
1908 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1909 if (UNEXPECTED(Z_TYPE(p->val) == IS_INDIRECT) &&
1910 UNEXPECTED(Z_TYPE_P(Z_INDIRECT(p->val)) == IS_UNDEF)) {
1911 continue;
1912 }
1913 if (p->key) {
1914 t = _zend_hash_add_or_update_i(target, p->key, &p->val, HASH_ADD | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
1915 if (t && pCopyConstructor) {
1916 pCopyConstructor(t);
1917 }
1918 } else {
1919 t = zend_hash_index_add(target, p->h, &p->val);
1920 if (t && pCopyConstructor) {
1921 pCopyConstructor(t);
1922 }
1923 }
1924 }
1925 }
1926 if (target->nNumOfElements > 0) {
1927 idx = 0;
1928 while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1929 idx++;
1930 }
1931 target->nInternalPointer = idx;
1932 }
1933 }
1934
1935
1936 static zend_bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, Bucket *p, void *pParam, merge_checker_func_t merge_checker_func)
1937 {
1938 zend_hash_key hash_key;
1939
1940 hash_key.h = p->h;
1941 hash_key.key = p->key;
1942 return merge_checker_func(target, source_data, &hash_key, pParam);
1943 }
1944
1945
1946 ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
1947 {
1948 uint32_t idx;
1949 Bucket *p;
1950 zval *t;
1951
1952 IS_CONSISTENT(source);
1953 IS_CONSISTENT(target);
1954 HT_ASSERT(GC_REFCOUNT(target) == 1);
1955
1956 for (idx = 0; idx < source->nNumUsed; idx++) {
1957 p = source->arData + idx;
1958 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
1959 if (zend_hash_replace_checker_wrapper(target, &p->val, p, pParam, pMergeSource)) {
1960 t = zend_hash_update(target, p->key, &p->val);
1961 if (t && pCopyConstructor) {
1962 pCopyConstructor(t);
1963 }
1964 }
1965 }
1966 if (target->nNumOfElements > 0) {
1967 idx = 0;
1968 while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
1969 idx++;
1970 }
1971 target->nInternalPointer = idx;
1972 }
1973 }
1974
1975
1976
1977 ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
1978 {
1979 Bucket *p;
1980
1981 IS_CONSISTENT(ht);
1982
1983 p = zend_hash_find_bucket(ht, key);
1984 return p ? &p->val : NULL;
1985 }
1986
1987 ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
1988 {
1989 zend_ulong h;
1990 Bucket *p;
1991
1992 IS_CONSISTENT(ht);
1993
1994 h = zend_inline_hash_func(str, len);
1995 p = zend_hash_str_find_bucket(ht, str, len, h);
1996 return p ? &p->val : NULL;
1997 }
1998
1999 ZEND_API zend_bool ZEND_FASTCALL zend_hash_exists(const HashTable *ht, zend_string *key)
2000 {
2001 Bucket *p;
2002
2003 IS_CONSISTENT(ht);
2004
2005 p = zend_hash_find_bucket(ht, key);
2006 return p ? 1 : 0;
2007 }
2008
2009 ZEND_API zend_bool ZEND_FASTCALL zend_hash_str_exists(const HashTable *ht, const char *str, size_t len)
2010 {
2011 zend_ulong h;
2012 Bucket *p;
2013
2014 IS_CONSISTENT(ht);
2015
2016 h = zend_inline_hash_func(str, len);
2017 p = zend_hash_str_find_bucket(ht, str, len, h);
2018 return p ? 1 : 0;
2019 }
2020
2021 ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
2022 {
2023 Bucket *p;
2024
2025 IS_CONSISTENT(ht);
2026
2027 if (ht->u.flags & HASH_FLAG_PACKED) {
2028 if (h < ht->nNumUsed) {
2029 p = ht->arData + h;
2030 if (Z_TYPE(p->val) != IS_UNDEF) {
2031 return &p->val;
2032 }
2033 }
2034 return NULL;
2035 }
2036
2037 p = zend_hash_index_find_bucket(ht, h);
2038 return p ? &p->val : NULL;
2039 }
2040
2041
2042 ZEND_API zend_bool ZEND_FASTCALL zend_hash_index_exists(const HashTable *ht, zend_ulong h)
2043 {
2044 Bucket *p;
2045
2046 IS_CONSISTENT(ht);
2047
2048 if (ht->u.flags & HASH_FLAG_PACKED) {
2049 if (h < ht->nNumUsed) {
2050 if (Z_TYPE(ht->arData[h].val) != IS_UNDEF) {
2051 return 1;
2052 }
2053 }
2054 return 0;
2055 }
2056
2057 p = zend_hash_index_find_bucket(ht, h);
2058 return p ? 1 : 0;
2059 }
2060
2061
2062 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *pos)
2063 {
2064 uint32_t idx;
2065
2066 IS_CONSISTENT(ht);
2067 HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2068
2069 for (idx = 0; idx < ht->nNumUsed; idx++) {
2070 if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2071 *pos = idx;
2072 return;
2073 }
2074 }
2075 *pos = HT_INVALID_IDX;
2076 }
2077
2078
2079
2080
2081
2082 ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos)
2083 {
2084 uint32_t idx;
2085
2086 IS_CONSISTENT(ht);
2087 HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2088
2089 idx = ht->nNumUsed;
2090 while (idx > 0) {
2091 idx--;
2092 if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2093 *pos = idx;
2094 return;
2095 }
2096 }
2097 *pos = HT_INVALID_IDX;
2098 }
2099
2100
2101 ZEND_API int ZEND_FASTCALL zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos)
2102 {
2103 uint32_t idx = *pos;
2104
2105 IS_CONSISTENT(ht);
2106 HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2107
2108 if (idx != HT_INVALID_IDX) {
2109 while (1) {
2110 idx++;
2111 if (idx >= ht->nNumUsed) {
2112 *pos = HT_INVALID_IDX;
2113 return SUCCESS;
2114 }
2115 if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2116 *pos = idx;
2117 return SUCCESS;
2118 }
2119 }
2120 } else {
2121 return FAILURE;
2122 }
2123 }
2124
2125 ZEND_API int ZEND_FASTCALL zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos)
2126 {
2127 uint32_t idx = *pos;
2128
2129 IS_CONSISTENT(ht);
2130 HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
2131
2132 if (idx != HT_INVALID_IDX) {
2133 while (idx > 0) {
2134 idx--;
2135 if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
2136 *pos = idx;
2137 return SUCCESS;
2138 }
2139 }
2140 *pos = HT_INVALID_IDX;
2141 return SUCCESS;
2142 } else {
2143 return FAILURE;
2144 }
2145 }
2146
2147
2148
2149 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, HashPosition *pos)
2150 {
2151 uint32_t idx = *pos;
2152 Bucket *p;
2153
2154 IS_CONSISTENT(ht);
2155 if (idx != HT_INVALID_IDX) {
2156 p = ht->arData + idx;
2157 if (p->key) {
2158 *str_index = p->key;
2159 return HASH_KEY_IS_STRING;
2160 } else {
2161 *num_index = p->h;
2162 return HASH_KEY_IS_LONG;
2163 }
2164 }
2165 return HASH_KEY_NON_EXISTENT;
2166 }
2167
2168 ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, HashPosition *pos)
2169 {
2170 uint32_t idx = *pos;
2171 Bucket *p;
2172
2173 IS_CONSISTENT(ht);
2174 if (idx == HT_INVALID_IDX) {
2175 ZVAL_NULL(key);
2176 } else {
2177 p = ht->arData + idx;
2178 if (p->key) {
2179 ZVAL_STR_COPY(key, p->key);
2180 } else {
2181 ZVAL_LONG(key, p->h);
2182 }
2183 }
2184 }
2185
2186 ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos)
2187 {
2188 uint32_t idx = *pos;
2189 Bucket *p;
2190
2191 IS_CONSISTENT(ht);
2192 if (idx != HT_INVALID_IDX) {
2193 p = ht->arData + idx;
2194 if (p->key) {
2195 return HASH_KEY_IS_STRING;
2196 } else {
2197 return HASH_KEY_IS_LONG;
2198 }
2199 }
2200 return HASH_KEY_NON_EXISTENT;
2201 }
2202
2203
2204 ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos)
2205 {
2206 uint32_t idx = *pos;
2207 Bucket *p;
2208
2209 IS_CONSISTENT(ht);
2210 if (idx != HT_INVALID_IDX) {
2211 p = ht->arData + idx;
2212 return &p->val;
2213 } else {
2214 return NULL;
2215 }
2216 }
2217
2218 ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
2219 {
2220 zval val;
2221 zend_ulong h;
2222 zend_string *key;
2223
2224 ZVAL_COPY_VALUE(&val, &p->val);
2225 h = p->h;
2226 key = p->key;
2227
2228 ZVAL_COPY_VALUE(&p->val, &q->val);
2229 p->h = q->h;
2230 p->key = q->key;
2231
2232 ZVAL_COPY_VALUE(&q->val, &val);
2233 q->h = h;
2234 q->key = key;
2235 }
2236
2237 ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
2238 {
2239 zval val;
2240
2241 ZVAL_COPY_VALUE(&val, &p->val);
2242 ZVAL_COPY_VALUE(&p->val, &q->val);
2243 ZVAL_COPY_VALUE(&q->val, &val);
2244 }
2245
2246 ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
2247 {
2248 zval val;
2249 zend_ulong h;
2250
2251 ZVAL_COPY_VALUE(&val, &p->val);
2252 h = p->h;
2253
2254 ZVAL_COPY_VALUE(&p->val, &q->val);
2255 p->h = q->h;
2256
2257 ZVAL_COPY_VALUE(&q->val, &val);
2258 q->h = h;
2259 }
2260
2261 ZEND_API int ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, compare_func_t compar, zend_bool renumber)
2262 {
2263 Bucket *p;
2264 uint32_t i, j;
2265
2266 IS_CONSISTENT(ht);
2267 HT_ASSERT(GC_REFCOUNT(ht) == 1);
2268
2269 if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) {
2270 return SUCCESS;
2271 }
2272
2273 if (ht->nNumUsed == ht->nNumOfElements) {
2274 i = ht->nNumUsed;
2275 } else {
2276 for (j = 0, i = 0; j < ht->nNumUsed; j++) {
2277 p = ht->arData + j;
2278 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2279 if (i != j) {
2280 ht->arData[i] = *p;
2281 }
2282 i++;
2283 }
2284 }
2285
2286 sort((void *)ht->arData, i, sizeof(Bucket), compar,
2287 (swap_func_t)(renumber? zend_hash_bucket_renum_swap :
2288 ((ht->u.flags & HASH_FLAG_PACKED) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
2289
2290 HANDLE_BLOCK_INTERRUPTIONS();
2291 ht->nNumUsed = i;
2292 ht->nInternalPointer = 0;
2293
2294 if (renumber) {
2295 for (j = 0; j < i; j++) {
2296 p = ht->arData + j;
2297 p->h = j;
2298 if (p->key) {
2299 zend_string_release(p->key);
2300 p->key = NULL;
2301 }
2302 }
2303
2304 ht->nNextFreeElement = i;
2305 }
2306 if (ht->u.flags & HASH_FLAG_PACKED) {
2307 if (!renumber) {
2308 zend_hash_packed_to_hash(ht);
2309 }
2310 } else {
2311 if (renumber) {
2312 void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
2313 Bucket *old_buckets = ht->arData;
2314
2315 new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (ht->u.flags & HASH_FLAG_PERSISTENT));
2316 ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
2317 ht->nTableMask = HT_MIN_MASK;
2318 HT_SET_DATA_ADDR(ht, new_data);
2319 memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
2320 pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT & HASH_FLAG_PERSISTENT);
2321 HT_HASH_RESET_PACKED(ht);
2322 } else {
2323 zend_hash_rehash(ht);
2324 }
2325 }
2326
2327 HANDLE_UNBLOCK_INTERRUPTIONS();
2328
2329 return SUCCESS;
2330 }
2331
2332 static zend_always_inline int zend_hash_compare_impl(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered) {
2333 uint32_t idx1, idx2;
2334
2335 if (ht1->nNumOfElements != ht2->nNumOfElements) {
2336 return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
2337 }
2338
2339 for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
2340 Bucket *p1 = ht1->arData + idx1, *p2;
2341 zval *pData1, *pData2;
2342 int result;
2343
2344 if (Z_TYPE(p1->val) == IS_UNDEF) continue;
2345 if (ordered) {
2346 while (1) {
2347 ZEND_ASSERT(idx2 != ht2->nNumUsed);
2348 p2 = ht2->arData + idx2;
2349 if (Z_TYPE(p2->val) != IS_UNDEF) break;
2350 idx2++;
2351 }
2352 if (p1->key == NULL && p2->key == NULL) {
2353 if (p1->h != p2->h) {
2354 return p1->h > p2->h ? 1 : -1;
2355 }
2356 } else if (p1->key != NULL && p2->key != NULL) {
2357 if (ZSTR_LEN(p1->key) != ZSTR_LEN(p2->key)) {
2358 return ZSTR_LEN(p1->key) > ZSTR_LEN(p2->key) ? 1 : -1;
2359 }
2360
2361 result = memcmp(ZSTR_VAL(p1->key), ZSTR_VAL(p2->key), ZSTR_LEN(p1->key));
2362 if (result != 0) {
2363 return result;
2364 }
2365 } else {
2366
2367 return p1->key != NULL ? 1 : -1;
2368 }
2369 pData2 = &p2->val;
2370 idx2++;
2371 } else {
2372 if (p1->key == NULL) {
2373 pData2 = zend_hash_index_find(ht2, p1->h);
2374 if (pData2 == NULL) {
2375 return 1;
2376 }
2377 } else {
2378 pData2 = zend_hash_find(ht2, p1->key);
2379 if (pData2 == NULL) {
2380 return 1;
2381 }
2382 }
2383 }
2384
2385 pData1 = &p1->val;
2386 if (Z_TYPE_P(pData1) == IS_INDIRECT) {
2387 pData1 = Z_INDIRECT_P(pData1);
2388 }
2389 if (Z_TYPE_P(pData2) == IS_INDIRECT) {
2390 pData2 = Z_INDIRECT_P(pData2);
2391 }
2392
2393 if (Z_TYPE_P(pData1) == IS_UNDEF) {
2394 if (Z_TYPE_P(pData2) != IS_UNDEF) {
2395 return -1;
2396 }
2397 } else if (Z_TYPE_P(pData2) == IS_UNDEF) {
2398 return 1;
2399 } else {
2400 result = compar(pData1, pData2);
2401 if (result != 0) {
2402 return result;
2403 }
2404 }
2405 }
2406
2407 return 0;
2408 }
2409
2410 ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered)
2411 {
2412 int result;
2413 IS_CONSISTENT(ht1);
2414 IS_CONSISTENT(ht2);
2415
2416 HASH_PROTECT_RECURSION(ht1);
2417 HASH_PROTECT_RECURSION(ht2);
2418 result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
2419 HASH_UNPROTECT_RECURSION(ht1);
2420 HASH_UNPROTECT_RECURSION(ht2);
2421
2422 return result;
2423 }
2424
2425
2426 ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, compare_func_t compar, uint32_t flag)
2427 {
2428 uint32_t idx;
2429 Bucket *p, *res;
2430
2431 IS_CONSISTENT(ht);
2432
2433 if (ht->nNumOfElements == 0 ) {
2434 return NULL;
2435 }
2436
2437 idx = 0;
2438 while (1) {
2439 if (idx == ht->nNumUsed) {
2440 return NULL;
2441 }
2442 if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
2443 idx++;
2444 }
2445 res = ht->arData + idx;
2446 for (; idx < ht->nNumUsed; idx++) {
2447 p = ht->arData + idx;
2448 if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
2449
2450 if (flag) {
2451 if (compar(res, p) < 0) {
2452 res = p;
2453 }
2454 } else {
2455 if (compar(res, p) > 0) {
2456 res = p;
2457 }
2458 }
2459 }
2460 return &res->val;
2461 }
2462
2463 ZEND_API int ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
2464 {
2465 register const char *tmp = key;
2466
2467 const char *end = key + length;
2468 ZEND_ASSERT(*end == '\0');
2469
2470 if (*tmp == '-') {
2471 tmp++;
2472 }
2473
2474 if ((*tmp == '0' && length > 1)
2475 || (end - tmp > MAX_LENGTH_OF_LONG - 1)
2476 || (SIZEOF_ZEND_LONG == 4 &&
2477 end - tmp == MAX_LENGTH_OF_LONG - 1 &&
2478 *tmp > '2')) {
2479 return 0;
2480 }
2481 *idx = (*tmp - '0');
2482 while (1) {
2483 ++tmp;
2484 if (tmp == end) {
2485 if (*key == '-') {
2486 if (*idx-1 > ZEND_LONG_MAX) {
2487 return 0;
2488 }
2489 *idx = 0 - *idx;
2490 } else if (*idx > ZEND_LONG_MAX) {
2491 return 0;
2492 }
2493 return 1;
2494 }
2495 if (*tmp <= '9' && *tmp >= '0') {
2496 *idx = (*idx * 10) + (*tmp - '0');
2497 } else {
2498 return 0;
2499 }
2500 }
2501 }
2502
2503
2504
2505
2506
2507
2508
2509