]> git.cameronkatri.com Git - apple_cmds.git/blob - text_cmds/sort/radixsort.c
file_cmds: Fix install and COLORLS
[apple_cmds.git] / text_cmds / sort / radixsort.c
1 /*-
2 * Copyright (C) 2012 Oleg Moskalenko <mom040267@gmail.com>
3 * Copyright (C) 2012 Gabor Kovesdan <gabor@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <errno.h>
32 #include <err.h>
33 #include <langinfo.h>
34 #include <math.h>
35 #if defined(SORT_THREADS)
36 #include <pthread.h>
37 #ifndef __APPLE__
38 #include <semaphore.h>
39 #else
40 #include <mach/mach_init.h>
41 #include <mach/mach_error.h>
42 #include <mach/semaphore.h>
43 #include <mach/task.h>
44 #endif
45 #endif
46 #include <stdlib.h>
47 #include <string.h>
48 #include <wchar.h>
49 #include <wctype.h>
50 #include <unistd.h>
51
52 #include "coll.h"
53 #include "radixsort.h"
54
55 #define DEFAULT_SORT_FUNC_RADIXSORT mergesort
56
57 #define TINY_NODE(sl) ((sl)->tosort_num < 65)
58 #define SMALL_NODE(sl) ((sl)->tosort_num < 5)
59
60 /* are we sorting in reverse order ? */
61 static bool reverse_sort;
62
63 /* sort sub-levels array size */
64 static const size_t slsz = 256 * sizeof(struct sort_level*);
65
66 /* one sort level structure */
67 struct sort_level
68 {
69 struct sort_level **sublevels;
70 struct sort_list_item **leaves;
71 struct sort_list_item **sorted;
72 struct sort_list_item **tosort;
73 size_t leaves_num;
74 size_t leaves_sz;
75 size_t level;
76 size_t real_sln;
77 size_t start_position;
78 size_t sln;
79 size_t tosort_num;
80 size_t tosort_sz;
81 };
82
83 /* stack of sort levels ready to be sorted */
84 struct level_stack {
85 struct level_stack *next;
86 struct sort_level *sl;
87 };
88
89 static struct level_stack *g_ls;
90
91 #if defined(SORT_THREADS)
92 /* stack guarding mutex */
93 static pthread_cond_t g_ls_cond;
94 static pthread_mutex_t g_ls_mutex;
95
96 /* counter: how many items are left */
97 static size_t sort_left;
98 /* guarding mutex */
99
100 /* semaphore to count threads */
101 #ifndef __APPLE__
102 static sem_t mtsem;
103 #else
104 semaphore_t rmtsem;
105 #endif
106
107 /*
108 * Decrement items counter
109 */
110 static inline void
111 sort_left_dec(size_t n)
112 {
113 pthread_mutex_lock(&g_ls_mutex);
114 sort_left -= n;
115 if (sort_left == 0 && nthreads > 1)
116 pthread_cond_broadcast(&g_ls_cond);
117 pthread_mutex_unlock(&g_ls_mutex);
118 }
119
120 /*
121 * Do we have something to sort ?
122 *
123 * This routine does not need to be locked.
124 */
125 static inline bool
126 have_sort_left(void)
127 {
128 bool ret;
129
130 ret = (sort_left > 0);
131
132 return (ret);
133 }
134
135 #else
136
137 #define sort_left_dec(n)
138
139 #endif /* SORT_THREADS */
140
141 /*
142 * Push sort level to the stack
143 */
144 static inline void
145 push_ls(struct sort_level *sl)
146 {
147 struct level_stack *new_ls;
148
149 new_ls = sort_malloc(sizeof(struct level_stack));
150 new_ls->sl = sl;
151
152 #if defined(SORT_THREADS)
153 if (nthreads > 1)
154 pthread_mutex_lock(&g_ls_mutex);
155 #endif
156
157 new_ls->next = g_ls;
158 g_ls = new_ls;
159
160 #if defined(SORT_THREADS)
161 if (nthreads > 1)
162 pthread_cond_signal(&g_ls_cond);
163 #endif
164
165 #if defined(SORT_THREADS)
166 if (nthreads > 1)
167 pthread_mutex_unlock(&g_ls_mutex);
168 #endif
169 }
170
171 /*
172 * Pop sort level from the stack (single-threaded style)
173 */
174 static inline struct sort_level*
175 pop_ls_st(void)
176 {
177 struct sort_level *sl;
178
179 if (g_ls) {
180 struct level_stack *saved_ls;
181
182 sl = g_ls->sl;
183 saved_ls = g_ls;
184 g_ls = g_ls->next;
185 sort_free(saved_ls);
186 } else
187 sl = NULL;
188
189 return (sl);
190 }
191
192 #if defined(SORT_THREADS)
193
194 /*
195 * Pop sort level from the stack (multi-threaded style)
196 */
197 static inline struct sort_level*
198 pop_ls_mt(void)
199 {
200 struct level_stack *saved_ls;
201 struct sort_level *sl;
202
203 pthread_mutex_lock(&g_ls_mutex);
204
205 for (;;) {
206 if (g_ls) {
207 sl = g_ls->sl;
208 saved_ls = g_ls;
209 g_ls = g_ls->next;
210 break;
211 }
212 sl = NULL;
213 saved_ls = NULL;
214
215 if (have_sort_left() == 0)
216 break;
217 pthread_cond_wait(&g_ls_cond, &g_ls_mutex);
218 }
219
220 pthread_mutex_unlock(&g_ls_mutex);
221
222 sort_free(saved_ls);
223
224 return (sl);
225 }
226
227 #endif /* defined(SORT_THREADS) */
228
229 static void
230 add_to_sublevel(struct sort_level *sl, struct sort_list_item *item, size_t indx)
231 {
232 struct sort_level *ssl;
233
234 ssl = sl->sublevels[indx];
235
236 if (ssl == NULL) {
237 ssl = sort_malloc(sizeof(struct sort_level));
238 memset(ssl, 0, sizeof(struct sort_level));
239
240 ssl->level = sl->level + 1;
241 sl->sublevels[indx] = ssl;
242
243 ++(sl->real_sln);
244 }
245
246 if (++(ssl->tosort_num) > ssl->tosort_sz) {
247 ssl->tosort_sz = ssl->tosort_num + 128;
248 ssl->tosort = sort_realloc(ssl->tosort,
249 sizeof(struct sort_list_item*) * (ssl->tosort_sz));
250 }
251
252 ssl->tosort[ssl->tosort_num - 1] = item;
253 }
254
255 static inline void
256 add_leaf(struct sort_level *sl, struct sort_list_item *item)
257 {
258
259 if (++(sl->leaves_num) > sl->leaves_sz) {
260 sl->leaves_sz = sl->leaves_num + 128;
261 sl->leaves = sort_realloc(sl->leaves,
262 (sizeof(struct sort_list_item*) * (sl->leaves_sz)));
263 }
264 sl->leaves[sl->leaves_num - 1] = item;
265 }
266
267 static inline int
268 get_wc_index(struct sort_list_item *sli, size_t level)
269 {
270 const struct key_value *kv;
271 const struct bwstring *bws;
272
273 kv = get_key_from_keys_array(&sli->ka, 0);
274 bws = kv->k;
275
276 if ((BWSLEN(bws) > level))
277 return (unsigned char) BWS_GET(bws,level);
278 return (-1);
279 }
280
281 static void
282 place_item(struct sort_level *sl, size_t item)
283 {
284 struct sort_list_item *sli;
285 int c;
286
287 sli = sl->tosort[item];
288 c = get_wc_index(sli, sl->level);
289
290 if (c == -1)
291 add_leaf(sl, sli);
292 else
293 add_to_sublevel(sl, sli, c);
294 }
295
296 static void
297 free_sort_level(struct sort_level *sl)
298 {
299
300 if (sl) {
301 if (sl->leaves)
302 sort_free(sl->leaves);
303
304 if (sl->level > 0)
305 sort_free(sl->tosort);
306
307 if (sl->sublevels) {
308 struct sort_level *slc;
309 size_t sln;
310
311 sln = sl->sln;
312
313 for (size_t i = 0; i < sln; ++i) {
314 slc = sl->sublevels[i];
315 if (slc)
316 free_sort_level(slc);
317 }
318
319 sort_free(sl->sublevels);
320 }
321
322 sort_free(sl);
323 }
324 }
325
326 static void
327 run_sort_level_next(struct sort_level *sl)
328 {
329 struct sort_level *slc;
330 size_t i, sln, tosort_num;
331
332 if (sl->sublevels) {
333 sort_free(sl->sublevels);
334 sl->sublevels = NULL;
335 }
336
337 switch (sl->tosort_num) {
338 case 0:
339 goto end;
340 case (1):
341 sl->sorted[sl->start_position] = sl->tosort[0];
342 sort_left_dec(1);
343 goto end;
344 case (2):
345 if (list_coll_offset(&(sl->tosort[0]), &(sl->tosort[1]),
346 sl->level) > 0) {
347 sl->sorted[sl->start_position++] = sl->tosort[1];
348 sl->sorted[sl->start_position] = sl->tosort[0];
349 } else {
350 sl->sorted[sl->start_position++] = sl->tosort[0];
351 sl->sorted[sl->start_position] = sl->tosort[1];
352 }
353 sort_left_dec(2);
354
355 goto end;
356 default:
357 if (TINY_NODE(sl) || (sl->level > 15)) {
358 listcoll_t func;
359
360 func = get_list_call_func(sl->level);
361
362 sl->leaves = sl->tosort;
363 sl->leaves_num = sl->tosort_num;
364 sl->leaves_sz = sl->leaves_num;
365 sl->leaves = sort_realloc(sl->leaves,
366 (sizeof(struct sort_list_item *) *
367 (sl->leaves_sz)));
368 sl->tosort = NULL;
369 sl->tosort_num = 0;
370 sl->tosort_sz = 0;
371 sl->sln = 0;
372 sl->real_sln = 0;
373 if (sort_opts_vals.sflag) {
374 if (mergesort(sl->leaves, sl->leaves_num,
375 sizeof(struct sort_list_item *),
376 (int(*)(const void *, const void *)) func) == -1)
377 /* NOTREACHED */
378 err(2, "Radix sort error 3");
379 } else
380 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
381 sizeof(struct sort_list_item *),
382 (int(*)(const void *, const void *)) func);
383
384 memcpy(sl->sorted + sl->start_position,
385 sl->leaves, sl->leaves_num *
386 sizeof(struct sort_list_item*));
387
388 sort_left_dec(sl->leaves_num);
389
390 goto end;
391 } else {
392 sl->tosort_sz = sl->tosort_num;
393 sl->tosort = sort_realloc(sl->tosort,
394 sizeof(struct sort_list_item*) * (sl->tosort_sz));
395 }
396 }
397
398 sl->sln = 256;
399 sl->sublevels = sort_malloc(slsz);
400 memset(sl->sublevels, 0, slsz);
401
402 sl->real_sln = 0;
403
404 tosort_num = sl->tosort_num;
405 for (i = 0; i < tosort_num; ++i)
406 place_item(sl, i);
407
408 sort_free(sl->tosort);
409 sl->tosort = NULL;
410 sl->tosort_num = 0;
411 sl->tosort_sz = 0;
412
413 if (sl->leaves_num > 1) {
414 if (keys_num > 1) {
415 if (sort_opts_vals.sflag) {
416 mergesort(sl->leaves, sl->leaves_num,
417 sizeof(struct sort_list_item *),
418 (int(*)(const void *, const void *)) list_coll);
419 } else {
420 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
421 sizeof(struct sort_list_item *),
422 (int(*)(const void *, const void *)) list_coll);
423 }
424 } else if (!sort_opts_vals.sflag && sort_opts_vals.complex_sort) {
425 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
426 sizeof(struct sort_list_item *),
427 (int(*)(const void *, const void *)) list_coll_by_str_only);
428 }
429 }
430
431 sl->leaves_sz = sl->leaves_num;
432 sl->leaves = sort_realloc(sl->leaves, (sizeof(struct sort_list_item *) *
433 (sl->leaves_sz)));
434
435 if (!reverse_sort) {
436 memcpy(sl->sorted + sl->start_position, sl->leaves,
437 sl->leaves_num * sizeof(struct sort_list_item*));
438 sl->start_position += sl->leaves_num;
439 sort_left_dec(sl->leaves_num);
440
441 sort_free(sl->leaves);
442 sl->leaves = NULL;
443 sl->leaves_num = 0;
444 sl->leaves_sz = 0;
445
446 sln = sl->sln;
447
448 for (i = 0; i < sln; ++i) {
449 slc = sl->sublevels[i];
450
451 if (slc) {
452 slc->sorted = sl->sorted;
453 slc->start_position = sl->start_position;
454 sl->start_position += slc->tosort_num;
455 if (SMALL_NODE(slc))
456 run_sort_level_next(slc);
457 else
458 push_ls(slc);
459 sl->sublevels[i] = NULL;
460 }
461 }
462
463 } else {
464 size_t n;
465
466 sln = sl->sln;
467
468 for (i = 0; i < sln; ++i) {
469 n = sln - i - 1;
470 slc = sl->sublevels[n];
471
472 if (slc) {
473 slc->sorted = sl->sorted;
474 slc->start_position = sl->start_position;
475 sl->start_position += slc->tosort_num;
476 if (SMALL_NODE(slc))
477 run_sort_level_next(slc);
478 else
479 push_ls(slc);
480 sl->sublevels[n] = NULL;
481 }
482 }
483
484 memcpy(sl->sorted + sl->start_position, sl->leaves,
485 sl->leaves_num * sizeof(struct sort_list_item*));
486 sort_left_dec(sl->leaves_num);
487 }
488
489 end:
490 free_sort_level(sl);
491 }
492
493 /*
494 * Single-threaded sort cycle
495 */
496 static void
497 run_sort_cycle_st(void)
498 {
499 struct sort_level *slc;
500
501 for (;;) {
502 slc = pop_ls_st();
503 if (slc == NULL) {
504 break;
505 }
506 run_sort_level_next(slc);
507 }
508 }
509
510 #if defined(SORT_THREADS)
511
512 /*
513 * Multi-threaded sort cycle
514 */
515 static void
516 run_sort_cycle_mt(void)
517 {
518 struct sort_level *slc;
519
520 for (;;) {
521 slc = pop_ls_mt();
522 if (slc == NULL)
523 break;
524 run_sort_level_next(slc);
525 }
526 }
527
528 /*
529 * Sort cycle thread (in multi-threaded mode)
530 */
531 static void*
532 sort_thread(void* arg)
533 {
534
535 run_sort_cycle_mt();
536
537 #ifndef __APPLE__
538 sem_post(&mtsem);
539 #else
540 semaphore_signal(rmtsem);
541 #endif
542
543 return (arg);
544 }
545
546 #endif /* defined(SORT_THREADS) */
547
548 static void
549 run_top_sort_level(struct sort_level *sl)
550 {
551 struct sort_level *slc;
552
553 reverse_sort = sort_opts_vals.kflag ? keys[0].sm.rflag :
554 default_sort_mods->rflag;
555
556 sl->start_position = 0;
557 sl->sln = 256;
558 sl->sublevels = sort_malloc(slsz);
559 memset(sl->sublevels, 0, slsz);
560
561 for (size_t i = 0; i < sl->tosort_num; ++i)
562 place_item(sl, i);
563
564 if (sl->leaves_num > 1) {
565 if (keys_num > 1) {
566 if (sort_opts_vals.sflag) {
567 mergesort(sl->leaves, sl->leaves_num,
568 sizeof(struct sort_list_item *),
569 (int(*)(const void *, const void *)) list_coll);
570 } else {
571 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
572 sizeof(struct sort_list_item *),
573 (int(*)(const void *, const void *)) list_coll);
574 }
575 } else if (!sort_opts_vals.sflag && sort_opts_vals.complex_sort) {
576 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
577 sizeof(struct sort_list_item *),
578 (int(*)(const void *, const void *)) list_coll_by_str_only);
579 }
580 }
581
582 if (!reverse_sort) {
583 memcpy(sl->tosort + sl->start_position, sl->leaves,
584 sl->leaves_num * sizeof(struct sort_list_item*));
585 sl->start_position += sl->leaves_num;
586 sort_left_dec(sl->leaves_num);
587
588 for (size_t i = 0; i < sl->sln; ++i) {
589 slc = sl->sublevels[i];
590
591 if (slc) {
592 slc->sorted = sl->tosort;
593 slc->start_position = sl->start_position;
594 sl->start_position += slc->tosort_num;
595 push_ls(slc);
596 sl->sublevels[i] = NULL;
597 }
598 }
599
600 } else {
601 size_t n;
602
603 for (size_t i = 0; i < sl->sln; ++i) {
604
605 n = sl->sln - i - 1;
606 slc = sl->sublevels[n];
607
608 if (slc) {
609 slc->sorted = sl->tosort;
610 slc->start_position = sl->start_position;
611 sl->start_position += slc->tosort_num;
612 push_ls(slc);
613 sl->sublevels[n] = NULL;
614 }
615 }
616
617 memcpy(sl->tosort + sl->start_position, sl->leaves,
618 sl->leaves_num * sizeof(struct sort_list_item*));
619
620 sort_left_dec(sl->leaves_num);
621 }
622
623 #if defined(SORT_THREADS)
624 if (nthreads < 2) {
625 #endif
626 run_sort_cycle_st();
627 #if defined(SORT_THREADS)
628 } else {
629 size_t i;
630
631 for(i = 0; i < nthreads; ++i) {
632 pthread_attr_t attr;
633 pthread_t pth;
634
635 pthread_attr_init(&attr);
636 #ifndef __APPLE__
637 pthread_attr_setdetachstate(&attr,
638 PTHREAD_DETACHED);
639 #else
640 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
641 #endif
642
643 for (;;) {
644 int res = pthread_create(&pth, &attr,
645 sort_thread, NULL);
646 if (res >= 0)
647 break;
648 if (errno == EAGAIN) {
649 #ifndef __APPLE__
650 pthread_yield();
651 #else
652 sched_yield();
653 #endif
654 continue;
655 }
656 err(2, NULL);
657 }
658
659 pthread_attr_destroy(&attr);
660 }
661
662 for(i = 0; i < nthreads; ++i)
663 #ifndef __APPLE__
664 sem_wait(&mtsem);
665 #else
666 semaphore_wait(rmtsem);
667 #endif
668 }
669 #endif /* defined(SORT_THREADS) */
670 }
671
672 static void
673 run_sort(struct sort_list_item **base, size_t nmemb)
674 {
675 struct sort_level *sl;
676
677 #if defined(SORT_THREADS)
678 size_t nthreads_save = nthreads;
679 if (nmemb < MT_SORT_THRESHOLD)
680 nthreads = 1;
681
682 if (nthreads > 1) {
683 pthread_mutexattr_t mattr;
684
685 pthread_mutexattr_init(&mattr);
686 #ifndef __APPLE__
687 pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_ADAPTIVE_NP);
688 #endif
689
690 pthread_mutex_init(&g_ls_mutex, &mattr);
691 pthread_cond_init(&g_ls_cond, NULL);
692
693 pthread_mutexattr_destroy(&mattr);
694
695 #ifndef __APPLE__
696 sem_init(&mtsem, 0, 0);
697 #else
698 {
699 mach_port_t self = mach_task_self();
700 kern_return_t ret = semaphore_create(self, &rmtsem, SYNC_POLICY_FIFO, 0);
701 if (ret != KERN_SUCCESS) {
702 err(2,NULL);
703 }
704 }
705 #endif
706
707 }
708 #endif
709
710 sl = sort_malloc(sizeof(struct sort_level));
711 memset(sl, 0, sizeof(struct sort_level));
712
713 sl->tosort = base;
714 sl->tosort_num = nmemb;
715 sl->tosort_sz = nmemb;
716
717 #if defined(SORT_THREADS)
718 sort_left = nmemb;
719 #endif
720
721 run_top_sort_level(sl);
722
723 free_sort_level(sl);
724
725 #if defined(SORT_THREADS)
726 if (nthreads > 1) {
727 #ifndef __APPLE__
728 sem_destroy(&mtsem);
729 #else
730 {
731 mach_port_t self = mach_task_self();
732 semaphore_destroy(self,rmtsem);
733 }
734 #endif
735 pthread_mutex_destroy(&g_ls_mutex);
736 }
737 nthreads = nthreads_save;
738 #endif
739 }
740
741 void
742 rxsort(struct sort_list_item **base, size_t nmemb)
743 {
744
745 run_sort(base, nmemb);
746 }