2 * Copyright © 2009 Keith Packard <keithp@keithp.com>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 #if HAS_SAMPLE_PROFILE
22 #include <ao_sample_profile.h>
31 #define AO_NO_TASK_INDEX 0xff
33 struct ao_task * ao_tasks[AO_NUM_TASKS];
35 struct ao_task *ao_cur_task;
38 static uint8_t ao_cur_task_index;
41 #ifdef ao_arch_task_globals
45 #define AO_CHECK_STACK 0
48 static uint8_t in_yield;
50 static inline void ao_check_stack(void) {
52 if (!in_yield && ao_cur_task && &q < &ao_cur_task->stack[0])
53 ao_panic(AO_PANIC_STACK);
56 #define ao_check_stack()
60 #define ao_task_irq_check() ao_arch_irq_check()
62 #define ao_task_irq_check()
67 #define SLEEP_HASH_SIZE 17
69 static struct ao_list run_queue;
70 static struct ao_list alarm_queue;
71 static struct ao_list ao_sleep_queue[SLEEP_HASH_SIZE];
74 ao_task_to_run_queue(struct ao_task *task)
77 ao_list_del(&task->queue);
78 ao_list_append(&task->queue, &run_queue);
81 static struct ao_list *
82 ao_task_sleep_queue(void *wchan)
84 return &ao_sleep_queue[(uintptr_t) wchan % SLEEP_HASH_SIZE];
88 ao_task_to_sleep_queue(struct ao_task *task, void *wchan)
91 ao_list_del(&task->queue);
92 ao_list_append(&task->queue, ao_task_sleep_queue(wchan));
97 ao_task_validate_alarm_queue(void)
99 struct ao_task *alarm, *prev = NULL;
102 if (ao_list_is_empty(&alarm_queue))
104 ao_list_for_each_entry(alarm, &alarm_queue, struct ao_task, alarm_queue) {
106 if ((int16_t) (alarm->alarm - prev->alarm) < 0) {
112 for (i = 0; i < ao_num_tasks; i++) {
115 if (ao_list_is_empty(&alarm->alarm_queue))
118 if (!ao_list_is_empty(&alarm->alarm_queue))
122 if (ao_task_alarm_tick != ao_list_first_entry(&alarm_queue, struct ao_task, alarm_queue)->alarm)
126 #define ao_task_validate_alarm_queue()
129 AO_TICK_TYPE ao_task_alarm_tick;
132 ao_task_to_alarm_queue(struct ao_task *task)
134 struct ao_task *alarm;
136 ao_list_for_each_entry(alarm, &alarm_queue, struct ao_task, alarm_queue) {
137 if ((int16_t) (alarm->alarm - task->alarm) >= 0) {
138 ao_list_insert(&task->alarm_queue, alarm->alarm_queue.prev);
139 ao_task_alarm_tick = ao_list_first_entry(&alarm_queue, struct ao_task, alarm_queue)->alarm;
140 ao_task_validate_alarm_queue();
144 ao_list_append(&task->alarm_queue, &alarm_queue);
145 ao_task_alarm_tick = ao_list_first_entry(&alarm_queue, struct ao_task, alarm_queue)->alarm;
146 ao_task_validate_alarm_queue();
150 ao_task_from_alarm_queue(struct ao_task *task)
153 ao_list_del(&task->alarm_queue);
154 if (ao_list_is_empty(&alarm_queue))
155 ao_task_alarm_tick = 0;
157 ao_task_alarm_tick = ao_list_first_entry(&alarm_queue, struct ao_task, alarm_queue)->alarm;
158 ao_task_validate_alarm_queue();
162 ao_task_init_queue(struct ao_task *task)
164 ao_list_init(&task->queue);
165 ao_list_init(&task->alarm_queue);
169 ao_task_exit_queue(struct ao_task *task)
172 ao_list_del(&task->queue);
173 ao_list_del(&task->alarm_queue);
177 ao_task_check_alarm(AO_TICK_TYPE tick)
179 struct ao_task *alarm, *next;
182 ao_list_for_each_entry_safe(alarm, next, &alarm_queue, struct ao_task, alarm_queue) {
183 if ((int16_t) (tick - alarm->alarm) < 0)
186 ao_task_from_alarm_queue(alarm);
187 ao_task_to_run_queue(alarm);
195 ao_list_init(&run_queue);
196 ao_list_init(&alarm_queue);
197 ao_task_alarm_tick = 0;
198 for (i = 0; i < SLEEP_HASH_SIZE; i++)
199 ao_list_init(&ao_sleep_queue[i]);
204 ao_task_validate_queue(struct ao_task *task)
209 struct ao_list *queue;
211 flags = ao_arch_irqsave();
213 queue = ao_task_sleep_queue(task->wchan);
219 ao_list_for_each_entry(m, queue, struct ao_task, queue) {
225 ao_arch_irqrestore(flags);
230 ao_task_validate_alarm(struct ao_task *task)
236 flags = ao_arch_irqsave();
237 if (task->alarm == 0)
239 ao_list_for_each_entry(m, &alarm_queue, struct ao_task, alarm_queue) {
244 if ((int16_t) (m->alarm - task->alarm) > 0)
247 if ((int16_t) (task->alarm - m->alarm) > 0)
252 ao_arch_irqrestore(flags);
258 ao_task_validate(void)
261 struct ao_task *task;
264 for (i = 0; i < ao_num_tasks; i++) {
266 ret = ao_task_validate_queue(task);
269 printf ("sleeping task not on sleep queue %s %08x\n",
270 task->name, task->wchan);
272 printf ("running task not on run queue %s\n",
275 ret = ao_task_validate_alarm(task);
278 printf ("alarm task not on alarm queue %s %d\n",
279 task->name, task->alarm);
281 printf ("alarm queue has sooner entries after %s %d\n",
282 task->name, task->alarm);
284 printf ("alarm queue has later entries before %s %d\n",
285 task->name, task->alarm);
291 #endif /* HAS_TASK_QUEUE */
294 ao_stack_top(struct ao_task *task)
296 uint8_t *top = &task->stack8[AO_STACK_SIZE];
298 /* Subtract off the TLS space, but keep the resulting
299 * stack 8-byte aligned
302 return top - ((_tls_size() + 7) & ~3);
309 ao_add_task(struct ao_task * task, void (*task_func)(void), const char *name)
313 if (ao_num_tasks == AO_NUM_TASKS)
314 ao_panic(AO_PANIC_NO_TASK);
315 for (task_id = 1; task_id != 0; task_id++) {
316 for (t = 0; t < ao_num_tasks; t++)
317 if (ao_tasks[t]->task_id == task_id)
319 if (t == ao_num_tasks)
322 task->task_id = task_id;
326 * Construct a stack frame so that it will 'return'
327 * to the start of the task
329 uint32_t *sp = ao_stack_top(task);
333 ao_arch_init_stack(task, sp, task_func);
336 ao_task_init_queue(task);
337 ao_task_to_run_queue(task);
339 ao_tasks[ao_num_tasks] = task;
344 uint8_t ao_task_minimize_latency;
346 /* Task switching function. This must not use any stack variables */
348 ao_yield(void) ao_arch_naked_define
353 if (ao_cur_task == NULL)
354 ao_cur_task = ao_tasks[ao_num_tasks-1];
356 if (ao_cur_task_index == AO_NO_TASK_INDEX)
357 ao_cur_task_index = ao_num_tasks-1;
361 #if HAS_SAMPLE_PROFILE
362 AO_TICK_TYPE tick = ao_sample_profile_timer_value();
363 AO_TICK_TYPE run = tick - ao_cur_task->start;
364 if (run > ao_cur_task->max_run)
365 ao_cur_task->max_run = run;
366 ++ao_cur_task->yields;
368 ao_arch_save_stack();
373 if (ao_task_minimize_latency)
374 ao_arch_release_interrupts();
377 ao_arch_block_interrupts();
382 /* Find a task to run. If there isn't any runnable task,
383 * this loop will run forever, which is just fine
386 /* If the current task is running, move it to the
387 * end of the queue to allow other tasks a chance
389 if (ao_cur_task->wchan == NULL)
390 ao_task_to_run_queue(ao_cur_task);
392 ao_arch_memory_barrier();
393 if (!ao_list_is_empty(&run_queue))
395 /* Wait for interrupts when there's nothing ready */
396 if (ao_task_minimize_latency) {
397 ao_arch_release_interrupts();
398 ao_arch_block_interrupts();
400 ao_arch_wait_interrupt();
402 ao_cur_task = ao_list_first_entry(&run_queue, struct ao_task, queue);
405 uint8_t ao_last_task_index = ao_cur_task_index;
408 if (ao_cur_task_index == ao_num_tasks)
409 ao_cur_task_index = 0;
411 ao_cur_task = ao_tasks[ao_cur_task_index];
413 /* Check for ready task */
414 if (ao_cur_task->wchan == NULL)
417 /* Check if the alarm is set for a time which has passed */
418 if (ao_cur_task->alarm &&
419 (int16_t) (ao_time() - ao_cur_task->alarm) >= 0)
422 /* Wait for interrupts when there's nothing ready */
423 if (ao_cur_task_index == ao_last_task_index && !ao_task_minimize_latency)
424 ao_arch_wait_interrupt();
428 #if HAS_SAMPLE_PROFILE
429 ao_cur_task->start = ao_sample_profile_timer_value();
432 ao_mpu_stack_guard(ao_cur_task->stack);
438 _set_tls(ao_stack_top(ao_cur_task));
440 ao_arch_restore_stack();
444 ao_sleep(void *wchan)
448 flags = ao_arch_irqsave();
450 ao_cur_task->wchan = wchan;
452 ao_task_to_sleep_queue(ao_cur_task, wchan);
453 ao_arch_irqrestore(flags);
456 if (ao_cur_task->wchan) {
457 ao_cur_task->wchan = NULL;
458 ao_cur_task->alarm = 0;
465 ao_wakeup(void *wchan)
467 ao_validate_cur_stack();
469 struct ao_task *sleep, *next;
470 struct ao_list *sleep_queue;
473 if (ao_num_tasks == 0)
475 sleep_queue = ao_task_sleep_queue(wchan);
476 flags = ao_arch_irqsave();
477 ao_list_for_each_entry_safe(sleep, next, sleep_queue, struct ao_task, queue) {
478 if (sleep->wchan == wchan) {
480 ao_task_to_run_queue(sleep);
483 ao_arch_irqrestore(flags);
487 for (i = 0; i < ao_num_tasks; i++)
488 if (ao_tasks[i]->wchan == wchan)
489 ao_tasks[i]->wchan = NULL;
496 ao_sleep_for(void *wchan, AO_TICK_TYPE timeout)
502 flags = ao_arch_irqsave();
504 /* Make sure we sleep *at least* delay ticks, which means adding
505 * one to account for the fact that we may be close to the next tick
507 if (!(ao_cur_task->alarm = ao_time() + timeout + 1))
508 ao_cur_task->alarm = 1;
510 ao_task_to_alarm_queue(ao_cur_task);
511 ao_arch_irqrestore(flags);
514 ret = ao_sleep(wchan);
519 flags = ao_arch_irqsave();
521 ao_cur_task->alarm = 0;
523 ao_task_from_alarm_queue(ao_cur_task);
524 ao_arch_irqrestore(flags);
530 static uint8_t ao_forever;
533 ao_delay(AO_TICK_TYPE ticks)
537 ao_sleep_for(&ao_forever, ticks);
544 ao_arch_block_interrupts();
547 for (i = 0; i < ao_num_tasks; i++)
548 if (ao_tasks[i] == ao_cur_task)
550 ao_task_exit_queue(ao_cur_task);
552 i = ao_cur_task_index;
553 ao_cur_task_index = AO_NO_TASK_INDEX;
555 for (; i < ao_num_tasks; i++)
556 ao_tasks[i] = ao_tasks[i+1];
559 /* we'll never get back here */
567 struct ao_task *task;
568 AO_TICK_TYPE now = ao_time();
570 for (i = 0; i < ao_num_tasks; i++) {
572 printf("%2d: wchan %08x alarm %5d %s\n",
575 task->alarm ? (int16_t) (task->alarm - now) : 9999,
578 #if HAS_TASK_QUEUE && DEBUG
585 ao_start_scheduler(void)
588 ao_cur_task_index = AO_NO_TASK_INDEX;
591 #if HAS_ARCH_START_SCHEDULER
592 ao_arch_start_scheduler();
595 __builtin_unreachable();