Skip to content

Commit

Permalink
Implemented memory and CPU limits for LCOW.
Browse files Browse the repository at this point in the history
Signed-off-by: Yusuf Tarık Günaydın <[email protected]>
  • Loading branch information
notanaverageman committed Feb 2, 2019
1 parent 50e63ad commit 86bd2e9
Show file tree
Hide file tree
Showing 2 changed files with 80 additions and 64 deletions.
85 changes: 46 additions & 39 deletions daemon/oci_windows.go
Original file line number Diff line number Diff line change
Expand Up @@ -250,45 +250,7 @@ func (daemon *Daemon) createSpecWindowsFields(c *container.Container, s *specs.S
// First boot optimization
s.Windows.IgnoreFlushesDuringBoot = !c.HasBeenStartedBefore

// In s.Windows.Resources
cpuShares := uint16(c.HostConfig.CPUShares)
cpuMaximum := uint16(c.HostConfig.CPUPercent) * 100
cpuCount := uint64(c.HostConfig.CPUCount)
if c.HostConfig.NanoCPUs > 0 {
if isHyperV {
cpuCount = uint64(c.HostConfig.NanoCPUs / 1e9)
leftoverNanoCPUs := c.HostConfig.NanoCPUs % 1e9
if leftoverNanoCPUs != 0 {
cpuCount++
cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(cpuCount) / (1e9 / 10000))
if cpuMaximum < 1 {
// The requested NanoCPUs is so small that we rounded to 0, use 1 instead
cpuMaximum = 1
}
}
} else {
cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(sysinfo.NumCPU()) / (1e9 / 10000))
if cpuMaximum < 1 {
// The requested NanoCPUs is so small that we rounded to 0, use 1 instead
cpuMaximum = 1
}
}
}
memoryLimit := uint64(c.HostConfig.Memory)
s.Windows.Resources = &specs.WindowsResources{
CPU: &specs.WindowsCPUResources{
Maximum: &cpuMaximum,
Shares: &cpuShares,
Count: &cpuCount,
},
Memory: &specs.WindowsMemoryResources{
Limit: &memoryLimit,
},
Storage: &specs.WindowsStorageResources{
Bps: &c.HostConfig.IOMaximumBandwidth,
Iops: &c.HostConfig.IOMaximumIOps,
},
}
setResourcesInSpec(c, s, isHyperV)

// Read and add credentials from the security options if a credential spec has been provided.
if c.HostConfig.SecurityOpt != nil {
Expand Down Expand Up @@ -369,6 +331,9 @@ func (daemon *Daemon) createSpecLinuxFields(c *container.Container, s *specs.Spe
}
s.Root.Path = "rootfs"
s.Root.Readonly = c.HostConfig.ReadonlyRootfs

setResourcesInSpec(c, s, true) // LCOW is Hyper-V only

capabilities, err := caps.TweakCapabilities(oci.DefaultCapabilities(), c.HostConfig.CapAdd, c.HostConfig.CapDrop, c.HostConfig.Capabilities, c.HostConfig.Privileged)
if err != nil {
return fmt.Errorf("linux spec capabilities: %v", err)
Expand All @@ -384,6 +349,48 @@ func (daemon *Daemon) createSpecLinuxFields(c *container.Container, s *specs.Spe
return nil
}

func setResourcesInSpec(c *container.Container, s *specs.Spec, isHyperV bool) {
// In s.Windows.Resources
cpuShares := uint16(c.HostConfig.CPUShares)
cpuMaximum := uint16(c.HostConfig.CPUPercent) * 100
cpuCount := uint64(c.HostConfig.CPUCount)
if c.HostConfig.NanoCPUs > 0 {
if isHyperV {
cpuCount = uint64(c.HostConfig.NanoCPUs / 1e9)
leftoverNanoCPUs := c.HostConfig.NanoCPUs % 1e9
if leftoverNanoCPUs != 0 {
cpuCount++
cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(cpuCount) / (1e9 / 10000))
if cpuMaximum < 1 {
// The requested NanoCPUs is so small that we rounded to 0, use 1 instead
cpuMaximum = 1
}
}
} else {
cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(sysinfo.NumCPU()) / (1e9 / 10000))
if cpuMaximum < 1 {
// The requested NanoCPUs is so small that we rounded to 0, use 1 instead
cpuMaximum = 1
}
}
}
memoryLimit := uint64(c.HostConfig.Memory)
s.Windows.Resources = &specs.WindowsResources{
CPU: &specs.WindowsCPUResources{
Maximum: &cpuMaximum,
Shares: &cpuShares,
Count: &cpuCount,
},
Memory: &specs.WindowsMemoryResources{
Limit: &memoryLimit,
},
Storage: &specs.WindowsStorageResources{
Bps: &c.HostConfig.IOMaximumBandwidth,
Iops: &c.HostConfig.IOMaximumIOps,
},
}
}

func escapeArgs(args []string) []string {
escapedArgs := make([]string, len(args))
for i, a := range args {
Expand Down
59 changes: 34 additions & 25 deletions libcontainerd/client_local_windows.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,32 +176,9 @@ func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions inter
HvPartition: false,
}

c.extractResourcesFromSpec(spec, configuration)

if spec.Windows.Resources != nil {
if spec.Windows.Resources.CPU != nil {
if spec.Windows.Resources.CPU.Count != nil {
// This check is being done here rather than in adaptContainerSettings
// because we don't want to update the HostConfig in case this container
// is moved to a host with more CPUs than this one.
cpuCount := *spec.Windows.Resources.CPU.Count
hostCPUCount := uint64(sysinfo.NumCPU())
if cpuCount > hostCPUCount {
c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
cpuCount = hostCPUCount
}
configuration.ProcessorCount = uint32(cpuCount)
}
if spec.Windows.Resources.CPU.Shares != nil {
configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
}
if spec.Windows.Resources.CPU.Maximum != nil {
configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
}
}
if spec.Windows.Resources.Memory != nil {
if spec.Windows.Resources.Memory.Limit != nil {
configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
}
}
if spec.Windows.Resources.Storage != nil {
if spec.Windows.Resources.Storage.Bps != nil {
configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
Expand Down Expand Up @@ -417,6 +394,8 @@ func (c *client) createLinux(id string, spec *specs.Spec, runtimeOptions interfa
return fmt.Errorf("spec.Windows must not be nil for LCOW containers")
}

c.extractResourcesFromSpec(spec, configuration)

// We must have least one layer in the spec
if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) == 0 {
return fmt.Errorf("OCI spec is invalid - at least one LayerFolders must be supplied to the runtime")
Expand Down Expand Up @@ -598,6 +577,36 @@ func (c *client) createLinux(id string, spec *specs.Spec, runtimeOptions interfa
return nil
}

func (c *client) extractResourcesFromSpec(spec *specs.Spec, configuration *hcsshim.ContainerConfig) {
if spec.Windows.Resources != nil {
if spec.Windows.Resources.CPU != nil {
if spec.Windows.Resources.CPU.Count != nil {
// This check is being done here rather than in adaptContainerSettings
// because we don't want to update the HostConfig in case this container
// is moved to a host with more CPUs than this one.
cpuCount := *spec.Windows.Resources.CPU.Count
hostCPUCount := uint64(sysinfo.NumCPU())
if cpuCount > hostCPUCount {
c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
cpuCount = hostCPUCount
}
configuration.ProcessorCount = uint32(cpuCount)
}
if spec.Windows.Resources.CPU.Shares != nil {
configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
}
if spec.Windows.Resources.CPU.Maximum != nil {
configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
}
}
if spec.Windows.Resources.Memory != nil {
if spec.Windows.Resources.Memory.Limit != nil {
configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
}
}
}
}

func (c *client) Start(_ context.Context, id, _ string, withStdin bool, attachStdio StdioCallback) (int, error) {
ctr := c.getContainer(id)
switch {
Expand Down

0 comments on commit 86bd2e9

Please sign in to comment.