Skip to content

Commit

Permalink
Merge b225efc on remote branch
Browse files Browse the repository at this point in the history
Change-Id: Iecaef8945d848f13b1575bd9d5e31bb07de7c307
  • Loading branch information
Linux Build Service Account committed Aug 1, 2019
2 parents 7635e45 + b225efc commit c41bda2
Show file tree
Hide file tree
Showing 56 changed files with 872 additions and 312 deletions.
18 changes: 8 additions & 10 deletions drivers/android/binder_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -950,14 +950,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,

index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;

mm = alloc->vma_vm_mm;
if (!mmget_not_zero(mm))
goto err_mmget;
if (!down_write_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
vma = binder_alloc_get_vma(alloc);
if (vma) {
if (!mmget_not_zero(alloc->vma_vm_mm))
goto err_mmget;
mm = alloc->vma_vm_mm;
if (!down_write_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
}

list_lru_isolate(lru, item);
spin_unlock(lock);
Expand All @@ -970,10 +969,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
PAGE_SIZE);

trace_binder_unmap_user_end(alloc, index);

up_write(&mm->mmap_sem);
mmput(mm);
}
up_write(&mm->mmap_sem);
mmput(mm);

trace_binder_unmap_kernel_start(alloc, index);

Expand Down
2 changes: 1 addition & 1 deletion drivers/char/adsprpc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2844,7 +2844,7 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
context_notify_user(me->ctxtable[index], rsp->retval);
bail:
if (err)
pr_err("adsprpc: invalid response or context\n");
pr_debug("adsprpc: invalid response or context\n");
return err;
}

Expand Down
28 changes: 18 additions & 10 deletions drivers/clk/qcom/clk-branch.c
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,12 @@ static int clk_branch_toggle(struct clk_hw *hw, bool en,
clk_disable_regmap(hw);
}

/*
* Make sure enable/disable request goes through before waiting
* for CLK_OFF status to get updated.
*/
mb();

return clk_branch_wait(br, en, check_halt);
}

Expand All @@ -136,34 +142,36 @@ static int clk_branch_enable(struct clk_hw *hw)
static int clk_cbcr_set_flags(struct regmap *regmap, unsigned int reg,
unsigned long flags)
{
u32 cbcr_val;

regmap_read(regmap, reg, &cbcr_val);
u32 cbcr_val = 0;
u32 cbcr_mask;
int ret;

switch (flags) {
case CLKFLAG_PERIPH_OFF_SET:
cbcr_val |= BIT(12);
cbcr_val = cbcr_mask = BIT(12);
break;
case CLKFLAG_PERIPH_OFF_CLEAR:
cbcr_val &= ~BIT(12);
cbcr_mask = BIT(12);
break;
case CLKFLAG_RETAIN_PERIPH:
cbcr_val |= BIT(13);
cbcr_val = cbcr_mask = BIT(13);
break;
case CLKFLAG_NORETAIN_PERIPH:
cbcr_val &= ~BIT(13);
cbcr_mask = BIT(13);
break;
case CLKFLAG_RETAIN_MEM:
cbcr_val |= BIT(14);
cbcr_val = cbcr_mask = BIT(14);
break;
case CLKFLAG_NORETAIN_MEM:
cbcr_val &= ~BIT(14);
cbcr_mask = BIT(14);
break;
default:
return -EINVAL;
}

regmap_write(regmap, reg, cbcr_val);
ret = regmap_update_bits(regmap, reg, cbcr_mask, cbcr_val);
if (ret)
return ret;

/* Make sure power is enabled/disabled before returning. */
mb();
Expand Down
55 changes: 51 additions & 4 deletions drivers/cpuidle/lpm-levels.c
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2006-2007 Adam Belay <[email protected]>
* Copyright (C) 2009 Intel Corporation
*
Expand Down Expand Up @@ -109,6 +109,7 @@ static DEFINE_PER_CPU(struct lpm_cpu*, cpu_lpm);
static bool suspend_in_progress;
static struct hrtimer lpm_hrtimer;
static DEFINE_PER_CPU(struct hrtimer, histtimer);
static DEFINE_PER_CPU(struct hrtimer, biastimer);
static struct lpm_debug *lpm_debug;
static phys_addr_t lpm_debug_phys;
static const int num_dbg_elements = 0x100;
Expand Down Expand Up @@ -435,6 +436,34 @@ static void msm_pm_set_timer(uint32_t modified_time_us)
hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
}

static void biastimer_cancel(void)
{
unsigned int cpu = raw_smp_processor_id();
struct hrtimer *cpu_biastimer = &per_cpu(biastimer, cpu);
ktime_t time_rem;

time_rem = hrtimer_get_remaining(cpu_biastimer);
if (ktime_to_us(time_rem) <= 0)
return;

hrtimer_try_to_cancel(cpu_biastimer);
}

static enum hrtimer_restart biastimer_fn(struct hrtimer *h)
{
return HRTIMER_NORESTART;
}

static void biastimer_start(uint32_t time_ns)
{
ktime_t bias_ktime = ns_to_ktime(time_ns);
unsigned int cpu = raw_smp_processor_id();
struct hrtimer *cpu_biastimer = &per_cpu(biastimer, cpu);

cpu_biastimer->function = biastimer_fn;
hrtimer_start(cpu_biastimer, bias_ktime, HRTIMER_MODE_REL_PINNED);
}

static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
struct lpm_cpu *cpu, int *idx_restrict,
uint32_t *idx_restrict_time)
Expand Down Expand Up @@ -595,15 +624,22 @@ static void clear_predict_history(void)

static void update_history(struct cpuidle_device *dev, int idx);

static inline bool is_cpu_biased(int cpu)
static inline bool is_cpu_biased(int cpu, uint64_t *bias_time)
{
u64 now = sched_clock();
u64 last = sched_get_cpu_last_busy_time(cpu);
u64 diff = 0;

if (!last)
return false;

return (now - last) < BIAS_HYST;
diff = now - last;
if (diff < BIAS_HYST) {
*bias_time = BIAS_HYST - diff;
return true;
}

return false;
}

static int cpu_power_select(struct cpuidle_device *dev,
Expand All @@ -623,6 +659,7 @@ static int cpu_power_select(struct cpuidle_device *dev,
uint32_t next_wakeup_us = (uint32_t)sleep_us;
uint32_t min_residency, max_residency;
struct power_params *pwr_params;
uint64_t bias_time = 0;

if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us < 0)
return best_level;
Expand All @@ -631,8 +668,10 @@ static int cpu_power_select(struct cpuidle_device *dev,

next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));

if (is_cpu_biased(dev->cpu) && (!cpu_isolated(dev->cpu)))
if (is_cpu_biased(dev->cpu, &bias_time) && (!cpu_isolated(dev->cpu))) {
cpu->bias = bias_time;
goto done_select;
}

for (i = 0; i < cpu->nlevels; i++) {
bool allow;
Expand Down Expand Up @@ -1310,6 +1349,8 @@ static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
*/

if (!idx) {
if (cpu->bias)
biastimer_start(cpu->bias);
stop_critical_timings();
wfi();
start_critical_timings();
Expand Down Expand Up @@ -1420,6 +1461,10 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev,
histtimer_cancel();
clusttimer_cancel();
}
if (cpu->bias) {
biastimer_cancel();
cpu->bias = 0;
}
local_irq_enable();
return idx;
}
Expand Down Expand Up @@ -1723,6 +1768,8 @@ static int lpm_probe(struct platform_device *pdev)
for_each_possible_cpu(cpu) {
cpu_histtimer = &per_cpu(histtimer, cpu);
hrtimer_init(cpu_histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cpu_histtimer = &per_cpu(biastimer, cpu);
hrtimer_init(cpu_histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}

cluster_timer_init(lpm_root_node);
Expand Down
3 changes: 2 additions & 1 deletion drivers/cpuidle/lpm-levels.h
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
Expand Down Expand Up @@ -52,6 +52,7 @@ struct lpm_cpu {
uint32_t ref_premature_cnt;
uint32_t tmr_add;
bool lpm_prediction;
uint64_t bias;
struct cpuidle_driver *drv;
struct lpm_cluster *parent;
};
Expand Down
5 changes: 4 additions & 1 deletion drivers/gpu/drm/msm/sde_hdcp_2x.c
Original file line number Diff line number Diff line change
Expand Up @@ -749,7 +749,10 @@ static void sde_hdcp_2x_msg_recvd(struct sde_hdcp_2x_ctrl *hdcp)
goto exit;
}

out_msg = (u32)hdcp->app_data.response.data[0];
if (hdcp->app_data.response.length == 0)
out_msg = INVALID_MESSAGE;
else
out_msg = (u32)hdcp->app_data.response.data[0];

pr_debug("message received from TZ: %s\n",
sde_hdcp_2x_message_name(out_msg));
Expand Down
6 changes: 6 additions & 0 deletions drivers/gpu/msm/adreno.c
Original file line number Diff line number Diff line change
Expand Up @@ -3341,6 +3341,12 @@ int adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
return 0;

for (i = 0; i < GMU_CORE_LONG_WAKEUP_RETRY_LIMIT; i++) {
/*
* Make sure the previous register write is posted before
* checking the fence status
*/
mb();

adreno_read_gmureg(adreno_dev, ADRENO_REG_GMU_AHB_FENCE_STATUS,
&status);

Expand Down
39 changes: 20 additions & 19 deletions drivers/gpu/msm/kgsl_drawobj.c
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
Expand Down Expand Up @@ -223,8 +223,13 @@ static void drawobj_sync_func(struct kgsl_device *device,
trace_syncpoint_timestamp_expire(event->syncobj,
event->context, event->timestamp);

drawobj_sync_expire(device, event);
kgsl_context_put(event->context);
/*
* Put down the context ref count only if
* this thread successfully clears the pending bit mask.
*/
if (drawobj_sync_expire(device, event))
kgsl_context_put(event->context);

kgsl_drawobj_put(&event->syncobj->base);
}

Expand Down Expand Up @@ -254,40 +259,36 @@ static void drawobj_destroy_sparse(struct kgsl_drawobj *drawobj)
static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj)
{
struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
unsigned long pending = 0;
unsigned int i;

/* Zap the canary timer */
del_timer_sync(&syncobj->timer);

/*
* Copy off the pending list and clear each pending event atomically -
* this will render any subsequent asynchronous callback harmless.
* This marks each event for deletion. If any pending fence callbacks
* run between now and the actual cancel, the associated structures
* are kfreed only in the cancel call.
*/
for_each_set_bit(i, &syncobj->pending, KGSL_MAX_SYNCPOINTS) {
if (test_and_clear_bit(i, &syncobj->pending))
__set_bit(i, &pending);
}

/*
* Clear all pending events - this will render any subsequent async
* callbacks harmless
*/
for (i = 0; i < syncobj->numsyncs; i++) {
struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i];

/* Don't do anything if the event has already expired */
if (!test_bit(i, &pending))
/*
* Don't do anything if the event has already expired.
* If this thread clears the pending bit mask then it is
* responsible for doing context put.
*/
if (!test_and_clear_bit(i, &syncobj->pending))
continue;

switch (event->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
kgsl_cancel_event(drawobj->device,
&event->context->events, event->timestamp,
drawobj_sync_func, event);
/*
* Do context put here to make sure the context is alive
* till this thread cancels kgsl event.
*/
kgsl_context_put(event->context);
break;
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
kgsl_sync_fence_async_cancel(event->handle);
Expand All @@ -300,7 +301,7 @@ static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj)
* If we cancelled an event, there's a good chance that the context is
* on a dispatcher queue, so schedule to get it removed.
*/
if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) &&
if (!bitmap_empty(&syncobj->pending, KGSL_MAX_SYNCPOINTS) &&
drawobj->device->ftbl->drawctxt_sched)
drawobj->device->ftbl->drawctxt_sched(drawobj->device,
drawobj->context);
Expand Down
16 changes: 13 additions & 3 deletions drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
Original file line number Diff line number Diff line change
Expand Up @@ -980,8 +980,10 @@ static int cam_cpas_hw_start(void *hw_priv, void *start_args,
return -EINVAL;
}

if (!CAM_CPAS_CLIENT_VALID(client_indx))
if (!CAM_CPAS_CLIENT_VALID(client_indx)) {
CAM_ERR(CAM_CPAS, "Client index invalid %d", client_indx);
return -EINVAL;
}

mutex_lock(&cpas_hw->hw_mutex);
mutex_lock(&cpas_core->client_mutex[client_indx]);
Expand Down Expand Up @@ -1099,8 +1101,10 @@ static int cam_cpas_hw_stop(void *hw_priv, void *stop_args,
cmd_hw_stop = (struct cam_cpas_hw_cmd_stop *)stop_args;
client_indx = CAM_CPAS_GET_CLIENT_IDX(cmd_hw_stop->client_handle);

if (!CAM_CPAS_CLIENT_VALID(client_indx))
if (!CAM_CPAS_CLIENT_VALID(client_indx)) {
CAM_ERR(CAM_CPAS, "Client index invalid %d", client_indx);
return -EINVAL;
}

mutex_lock(&cpas_hw->hw_mutex);
mutex_lock(&cpas_core->client_mutex[client_indx]);
Expand Down Expand Up @@ -1162,14 +1166,20 @@ static int cam_cpas_hw_stop(void *hw_priv, void *stop_args,
ahb_vote.vote.level = CAM_SUSPEND_VOTE;
rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
&ahb_vote, NULL);
if (rc)
if (rc) {
CAM_ERR(CAM_CPAS, "ahb vote failed for %s rc %d",
cpas_client->data.identifier, rc);
goto done;
}

axi_vote.uncompressed_bw = 0;
axi_vote.compressed_bw = 0;
axi_vote.compressed_bw_ab = 0;
rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
cpas_client, &axi_vote);
if (rc)
CAM_ERR(CAM_CPAS, "axi vote failed for %s rc %d",
cpas_client->data.identifier, rc);

done:
mutex_unlock(&cpas_core->client_mutex[client_indx]);
Expand Down
Loading

0 comments on commit c41bda2

Please sign in to comment.