Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 3 additions & 4 deletions src/arch/aarch64/kernel/interrupts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ use memory_addresses::arch::aarch64::PhysAddr;
use crate::arch::aarch64::kernel::core_local::increment_irq_counter;
use crate::arch::aarch64::kernel::scheduler::State;
use crate::arch::aarch64::mm::paging::{self, BasePageSize, PageSize, PageTableEntryFlags};
use crate::arch::timer_interrupts;
#[cfg(not(feature = "pci"))]
use crate::drivers::mmio::get_interrupt_handlers;
#[cfg(feature = "pci")]
Expand Down Expand Up @@ -93,10 +94,8 @@ pub(crate) fn install_handlers() {

fn timer_handler() {
debug!("Handle timer interrupt");

// disable timer
CNTP_CVAL_EL0.set(0);
CNTP_CTL_EL0.write(CNTP_CTL_EL0::ENABLE::CLEAR);
timer_interrupts::clear_active();
timer_interrupts::set_next_timer();
}

for (key, value) in get_interrupt_handlers().into_iter() {
Expand Down
2 changes: 2 additions & 0 deletions src/arch/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
//! Architecture-specific architecture abstraction.

pub(crate) mod timer_interrupts;

cfg_if::cfg_if! {
if #[cfg(target_arch = "aarch64")] {
pub(crate) mod aarch64;
Expand Down
8 changes: 4 additions & 4 deletions src/arch/riscv64/kernel/scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,10 @@ use free_list::{PageLayout, PageRange};
use memory_addresses::{PhysAddr, VirtAddr};

use crate::arch::riscv64::kernel::core_local::core_scheduler;
use crate::arch::riscv64::kernel::processor::set_oneshot_timer;
use crate::arch::riscv64::mm::paging::{BasePageSize, PageSize, PageTableEntryFlags};
use crate::mm::{FrameAlloc, PageAlloc, PageRangeAllocator};
use crate::scheduler::task::{Task, TaskFrame};
use crate::{DEFAULT_STACK_SIZE, KERNEL_STACK_SIZE};
use crate::{DEFAULT_STACK_SIZE, KERNEL_STACK_SIZE, timer_interrupts};

#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
Expand Down Expand Up @@ -360,9 +359,10 @@ unsafe extern "C" fn task_start(func: extern "C" fn(usize), arg: usize, user_sta
}

pub fn timer_handler() {
//increment_irq_counter(apic::TIMER_INTERRUPT_NUMBER.into());
debug!("Handle timer interrupt");
timer_interrupts::clear_active();
timer_interrupts::set_next_timer();
core_scheduler().handle_waiting_tasks();
set_oneshot_timer(None);
core_scheduler().scheduler();
}

Expand Down
118 changes: 118 additions & 0 deletions src/arch/timer_interrupts.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
use core::sync::atomic::{AtomicU64, Ordering};

#[cfg(feature = "net")]
use crate::executor::network::NETWORK_WAKER;
use crate::set_oneshot_timer;

/// A possible timer interrupt source (i.e. reason the timer interrupt was set
/// up).
#[derive(PartialEq, Eq)]
pub enum Source {
Network,
Scheduler,
}

/// A slot in the timer list. Each source is represented once. This is so that
/// we can have multiple timers at the same time with only one hardware timer.
struct Slot {
/// Timer source.
source: Source,
/// Point in time at which to wake up (in microsecond precision).
/// A value of [`u64::MAX`] means the timer is not set.
wakeup_time: AtomicU64,
}

/// The actual timer list with one entry for each source.
static TIMERS: [Slot; 2] = [
Slot {
source: Source::Network,
wakeup_time: AtomicU64::new(u64::MAX),
},
Slot {
source: Source::Scheduler,
wakeup_time: AtomicU64::new(u64::MAX),
},
];

/// Create a new timer, overriding any previous timer for the source.
#[cfg(feature = "net")]
pub fn create_timer(source: Source, wakeup_micros: u64) {
trace!("Setting relative timer interrupt for {wakeup_micros}us");

create_timer_abs(
source,
crate::arch::processor::get_timer_ticks() + wakeup_micros,
);
}

/// Crete a new timer, but with an absolute wakeup time.
pub fn create_timer_abs(source: Source, wakeup_time: u64) {
trace!(
"Setting an absolute timer interrupt for {}us",
wakeup_time - crate::arch::processor::get_timer_ticks()
);

{
// SAFETY: Our timer list has an entry for every possible source
let previous_entry = TIMERS.iter().find(|slot| slot.source == source).unwrap();

// Overwrite the wakeup time
previous_entry
.wakeup_time
.store(wakeup_time, Ordering::Relaxed);
}

// If this timer is the one closest in the future, set the real timer to it
// SAFETY: There's more than 1 slot
if TIMERS
.iter()
.map(|slot| slot.wakeup_time.load(Ordering::Relaxed))
.min_by(|a, b| a.cmp(b))
.unwrap()
== wakeup_time
{
trace!("Setting the oneshot timer now");

set_oneshot_timer(Some(wakeup_time));
}
}

/// Sets the next timer or disables it if no timer is pending.
pub fn set_next_timer() {
// SAFETY: There's more than 1 slot
let lowest_timer = TIMERS
.iter()
.map(|slot| slot.wakeup_time.load(Ordering::Relaxed))
.min_by(|a, b| a.cmp(b))
.unwrap();

if lowest_timer == u64::MAX {
set_oneshot_timer(None);
} else {
set_oneshot_timer(Some(lowest_timer));
}
}

/// Clears the timer slot for the currently active timer.
pub fn clear_active() {
// SAFETY: There's more than 1 slot
let lowest_timer = TIMERS
.iter()
.min_by(|a, b| {
a.wakeup_time
.load(Ordering::Relaxed)
.cmp(&b.wakeup_time.load(Ordering::Relaxed))
})
.unwrap();

// TODO: Do we really want to do this here?
match lowest_timer.source {
#[cfg(feature = "net")]
Source::Network => NETWORK_WAKER.lock().wake(),
_ => {} // no-op, we always poll after a timer interrupt
}

trace!("Cleared active timer");

lowest_timer.wakeup_time.store(u64::MAX, Ordering::Relaxed);
}
7 changes: 6 additions & 1 deletion src/arch/x86_64/kernel/scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ use crate::arch::x86_64::mm::paging::{
BasePageSize, PageSize, PageTableEntryFlags, PageTableEntryFlagsExt,
};
use crate::config::*;
use crate::env;
use crate::mm::{FrameAlloc, PageAlloc, PageRangeAllocator};
use crate::scheduler::PerCoreSchedulerExt;
use crate::scheduler::task::{Task, TaskFrame};
use crate::{env, timer_interrupts};

#[repr(C, packed)]
struct State {
Expand Down Expand Up @@ -318,6 +318,11 @@ impl TaskFrame for Task {

extern "x86-interrupt" fn timer_handler(_stack_frame: interrupts::ExceptionStackFrame) {
increment_irq_counter(apic::TIMER_INTERRUPT_NUMBER);

debug!("Handle timer interrupt");
timer_interrupts::clear_active();
timer_interrupts::set_next_timer();

core_scheduler().handle_waiting_tasks();
apic::eoi();
core_scheduler().reschedule();
Expand Down
4 changes: 4 additions & 0 deletions src/drivers/net/gem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ use crate::drivers::net::{NetworkDriver, mtu};
#[cfg(feature = "pci")]
use crate::drivers::pci as hardware;
use crate::drivers::{Driver, InterruptLine};
use crate::executor::network::NETWORK_WAKER;
use crate::mm::device_alloc::DeviceAlloc;
use crate::{BasePageSize, PageSize};

Expand Down Expand Up @@ -276,6 +277,9 @@ impl NetworkDriver for GEMDriver {

fn handle_interrupt(&mut self) {
self.tx_fields.handle_interrupt();

trace!("Waking network waker");
NETWORK_WAKER.lock().wake();
}
}

Expand Down
4 changes: 3 additions & 1 deletion src/drivers/net/loopback.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ use smoltcp::time::Instant;

use crate::drivers::net::NetworkDriver;
use crate::drivers::{Driver, InterruptLine};
use crate::executor::network::NETWORK_WAKER;
use crate::mm::device_alloc::DeviceAlloc;

pub(crate) struct LoopbackDriver {
Expand Down Expand Up @@ -122,7 +123,8 @@ impl NetworkDriver for LoopbackDriver {
}

fn handle_interrupt(&mut self) {
// no-op
trace!("Waking network waker");
NETWORK_WAKER.lock().wake();
}
}

Expand Down
4 changes: 4 additions & 0 deletions src/drivers/net/rtl8139.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ use crate::drivers::Driver;
use crate::drivers::error::DriverError;
use crate::drivers::net::{NetworkDriver, mtu};
use crate::drivers::pci::PciDevice;
use crate::executor::network::NETWORK_WAKER;
use crate::mm::device_alloc::DeviceAlloc;

/// size of the receive buffer
Expand Down Expand Up @@ -687,6 +688,9 @@ impl NetworkDriver for RTL8139Driver {
self.regs.as_mut_ptr().isr().write(le16::from(
isr_contents & (ISR_RXOVW | ISR_TER | ISR_RER | ISR_TOK | ISR_ROK),
));

trace!("Waking network waker");
NETWORK_WAKER.lock().wake();
}
}

Expand Down
4 changes: 4 additions & 0 deletions src/drivers/net/virtio/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ use crate::drivers::virtio::virtqueue::{
AvailBufferToken, BufferElem, BufferType, UsedBufferToken, VirtQueue, Virtq,
};
use crate::drivers::{Driver, InterruptLine};
use crate::executor::network::NETWORK_WAKER;
use crate::mm::device_alloc::DeviceAlloc;

/// A wrapper struct for the raw configuration structure.
Expand Down Expand Up @@ -416,6 +417,9 @@ impl NetworkDriver for VirtioNetDriver<Init> {
}

self.isr_stat.acknowledge();

trace!("Waking network waker");
NETWORK_WAKER.lock().wake();
}
}

Expand Down
96 changes: 9 additions & 87 deletions src/executor/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,11 @@ use core::time::Duration;

use crossbeam_utils::Backoff;
use hermit_sync::without_interrupts;
#[cfg(feature = "net")]
use smoltcp::time::Instant;

use crate::arch::core_local;
use crate::errno::Errno;
use crate::executor::task::AsyncTask;
use crate::io;
#[cfg(feature = "net")]
use crate::scheduler::PerCoreSchedulerExt;
use crate::synch::futex::*;

/// WakerRegistration is derived from smoltcp's
Expand Down Expand Up @@ -155,101 +151,27 @@ where

let now = crate::arch::kernel::systemtime::now_micros();
if let Poll::Ready(t) = result {
// allow network interrupts
#[cfg(feature = "net")]
{
if let Some(mut guard) = crate::executor::network::NIC.try_lock() {
let delay = if let Ok(nic) = guard.as_nic_mut() {
nic.set_polling_mode(false);

nic.poll_delay(Instant::from_micros_const(now.try_into().unwrap()))
.map(|d| d.total_micros())
} else {
None
};
core_local::core_scheduler().add_network_timer(
delay.map(|d| crate::arch::processor::get_timer_ticks() + d),
);
}
}

return t;
}

if let Some(duration) = timeout
&& Duration::from_micros(now - start) >= duration
{
// allow network interrupts
#[cfg(feature = "net")]
{
if let Some(mut guard) = crate::executor::network::NIC.try_lock() {
let delay = if let Ok(nic) = guard.as_nic_mut() {
nic.set_polling_mode(false);

nic.poll_delay(Instant::from_micros_const(now.try_into().unwrap()))
.map(|d| d.total_micros())
} else {
None
};
core_local::core_scheduler().add_network_timer(
delay.map(|d| crate::arch::processor::get_timer_ticks() + d),
);
}
}

return Err(Errno::Time);
}

#[cfg(feature = "net")]
// TODO: I have no idea whether this is correct
if backoff.is_completed() {
let delay = if let Some(mut guard) = crate::executor::network::NIC.try_lock() {
if let Ok(nic) = guard.as_nic_mut() {
nic.set_polling_mode(false);

nic.poll_delay(Instant::from_micros_const(now.try_into().unwrap()))
.map(|d| d.total_micros())
} else {
None
}
} else {
None
};

if delay.unwrap_or(10_000_000) > 10_000 {
core_local::core_scheduler().add_network_timer(
delay.map(|d| crate::arch::processor::get_timer_ticks() + d),
);
let wakeup_time =
timeout.map(|duration| start + u64::try_from(duration.as_micros()).unwrap());

// switch to another task
task_notify.wait(wakeup_time);

// restore default values
if let Ok(nic) = crate::executor::network::NIC.lock().as_nic_mut() {
nic.set_polling_mode(true);
}

backoff.reset();
}
} else {
backoff.snooze();
}

#[cfg(not(feature = "net"))]
{
if backoff.is_completed() {
let wakeup_time =
timeout.map(|duration| start + u64::try_from(duration.as_micros()).unwrap());
let wakeup_time =
timeout.map(|duration| start + u64::try_from(duration.as_micros()).unwrap());

// switch to another task
task_notify.wait(wakeup_time);
// switch to another task
task_notify.wait(wakeup_time);

// restore default values
backoff.reset();
} else {
backoff.snooze();
}
// restore default values
backoff.reset();
} else {
backoff.snooze();
}
}
}
Loading
Loading