drm/i915: Introduce IS_GEN9_BC for Skylake and Kabylake.

Along with GLK it was introduced the .is_lp and IS_GEN9_LP.
So, following the same simplification standard we can
put Skylake and Kabylake under the same bucket for most
of the things.

So let's add the IS_GEN9_BC for "Big Core" (non Atom based
platforms).

The i915_drv.c was let out of this patch on purpose
because that is really a decision per platform, just like
other cases where IS_KABYLAKE is different from IS_SKYLAKE.

v2: fix conflict with IS_LP and 3 new cases for this
    big core bucket:
    - intel_ddi.c: intel_ddi_get_link_dpll
    - intel_fbc.c: find_compression_threshold
    - i915_gem_gtt.c: gtt_write_workarounds

Cc: Anusha Srivatsa <anusha.srivatsa@intel.com>
Cc: Ander Conselvan de Oliveira <ander.conselvan.de.oliveira@intel.com>
Cc: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Ander Conselvan de Oliveira <conselvan2@gmail.com>
Acked-by: Jani Nikula <jani.nikula@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1485196357-30599-2-git-send-email-rodrigo.vivi@intel.com
tirimbino
Rodrigo Vivi 8 years ago
parent 8da53efaa2
commit b976dc53ec
  1. 17
      drivers/gpu/drm/i915/i915_debugfs.c
  2. 3
      drivers/gpu/drm/i915/i915_drv.h
  3. 2
      drivers/gpu/drm/i915/i915_gem_gtt.c
  4. 2
      drivers/gpu/drm/i915/intel_audio.c
  5. 4
      drivers/gpu/drm/i915/intel_color.c
  6. 20
      drivers/gpu/drm/i915/intel_ddi.c
  7. 12
      drivers/gpu/drm/i915/intel_display.c
  8. 5
      drivers/gpu/drm/i915/intel_dp.c
  9. 2
      drivers/gpu/drm/i915/intel_dpll_mgr.c
  10. 3
      drivers/gpu/drm/i915/intel_fbc.c
  11. 4
      drivers/gpu/drm/i915/intel_i2c.c
  12. 2
      drivers/gpu/drm/i915/intel_mocs.c
  13. 13
      drivers/gpu/drm/i915/intel_pm.c
  14. 10
      drivers/gpu/drm/i915/intel_runtime_pm.c

@ -1224,21 +1224,18 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff; rp_state_cap >> 16) & 0xff;
max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ? max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq)); intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8; max_freq = (rp_state_cap & 0xff00) >> 8;
max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ? max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq)); intel_gpu_freq(dev_priv, max_freq));
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff; rp_state_cap >> 0) & 0xff;
max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ? max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq)); intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n", seq_printf(m, "Max overclocked frequency: %dMHz\n",
@ -1814,7 +1811,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret) if (ret)
goto out; goto out;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_GEN9_BC(dev_priv)) {
/* Convert GT frequency to 50 HZ units */ /* Convert GT frequency to 50 HZ units */
min_gpu_freq = min_gpu_freq =
dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER; dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
@ -1834,8 +1831,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
&ia_freq); &ia_freq);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(dev_priv, (gpu_freq * intel_gpu_freq(dev_priv, (gpu_freq *
(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ? (IS_GEN9_BC(dev_priv) ?
GEN9_FREQ_SCALER : 1))), GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100, ((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100); ((ia_freq >> 8) & 0xff) * 100);
} }
@ -4450,7 +4447,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->slice_mask |= BIT(s); sseu->slice_mask |= BIT(s);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_GEN9_BC(dev_priv))
sseu->subslice_mask = sseu->subslice_mask =
INTEL_INFO(dev_priv)->sseu.subslice_mask; INTEL_INFO(dev_priv)->sseu.subslice_mask;

@ -2760,8 +2760,9 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7))) #define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8))) #define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && INTEL_INFO(dev_priv)->is_lp)
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv))
#define ENGINE_MASK(id) BIT(id) #define ENGINE_MASK(id) BIT(id)
#define RENDER_RING ENGINE_MASK(RCS) #define RENDER_RING ENGINE_MASK(RCS)

@ -2189,7 +2189,7 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
else if (IS_CHERRYVIEW(dev_priv)) else if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) else if (IS_GEN9_BC(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
else if (IS_BROXTON(dev_priv)) else if (IS_BROXTON(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);

@ -702,7 +702,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
u32 tmp; u32 tmp;
if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) if (!IS_GEN9_BC(dev_priv))
return; return;
i915_audio_component_get_power(kdev); i915_audio_component_get_power(kdev);

@ -536,8 +536,8 @@ void intel_color_init(struct drm_crtc *crtc)
} else if (IS_HASWELL(dev_priv)) { } else if (IS_HASWELL(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix; dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = haswell_load_luts; dev_priv->display.load_luts = haswell_load_luts;
} else if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv) || } else if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv) ||
IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) { IS_BROXTON(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix; dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = broadwell_load_luts; dev_priv->display.load_luts = broadwell_load_luts;
} else { } else {

@ -445,7 +445,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
if (IS_GEN9_LP(dev_priv)) if (IS_GEN9_LP(dev_priv))
return hdmi_level; return hdmi_level;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_GEN9_BC(dev_priv)) {
skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
hdmi_default_entry = 8; hdmi_default_entry = 8;
} else if (IS_BROADWELL(dev_priv)) { } else if (IS_BROADWELL(dev_priv)) {
@ -518,7 +518,7 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
} }
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_GEN9_BC(dev_priv)) {
/* If we're boosting the current, set bit 31 of trans1 */ /* If we're boosting the current, set bit 31 of trans1 */
if (dev_priv->vbt.ddi_port_info[port].dp_boost_level) if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
@ -572,7 +572,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder)
hdmi_level = intel_ddi_hdmi_level(dev_priv, port); hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_GEN9_BC(dev_priv)) {
ddi_translations_hdmi = skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); ddi_translations_hdmi = skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
/* If we're boosting the current, set bit 31 of trans1 */ /* If we're boosting the current, set bit 31 of trans1 */
@ -1089,7 +1089,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
if (INTEL_GEN(dev_priv) <= 8) if (INTEL_GEN(dev_priv) <= 8)
hsw_ddi_clock_get(encoder, pipe_config); hsw_ddi_clock_get(encoder, pipe_config);
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) else if (IS_GEN9_BC(dev_priv))
skl_ddi_clock_get(encoder, pipe_config); skl_ddi_clock_get(encoder, pipe_config);
else if (IS_GEN9_LP(dev_priv)) else if (IS_GEN9_LP(dev_priv))
bxt_ddi_clock_get(encoder, pipe_config); bxt_ddi_clock_get(encoder, pipe_config);
@ -1150,7 +1150,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder = struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(crtc_state); intel_ddi_get_crtc_new_encoder(crtc_state);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_GEN9_BC(dev_priv))
return skl_ddi_pll_select(intel_crtc, crtc_state, return skl_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder); intel_encoder);
else if (IS_GEN9_LP(dev_priv)) else if (IS_GEN9_LP(dev_priv))
@ -1641,7 +1641,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
level = translate_signal_level(signal_levels); level = translate_signal_level(signal_levels);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level); skl_ddi_set_iboost(encoder, level);
else if (IS_GEN9_LP(dev_priv)) else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
@ -1658,7 +1658,7 @@ void intel_ddi_clk_select(struct intel_encoder *encoder,
if (WARN_ON(!pll)) if (WARN_ON(!pll))
return; return;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_GEN9_BC(dev_priv)) {
uint32_t val; uint32_t val;
/* DDI -> PLL mapping */ /* DDI -> PLL mapping */
@ -1714,7 +1714,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_clk_select(encoder, pll); intel_ddi_clk_select(encoder, pll);
intel_prepare_hdmi_ddi_buffers(encoder); intel_prepare_hdmi_ddi_buffers(encoder);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level); skl_ddi_set_iboost(encoder, level);
else if (IS_GEN9_LP(dev_priv)) else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port, bxt_ddi_vswing_sequence(dev_priv, level, port,
@ -1784,7 +1784,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
intel_edp_panel_off(intel_dp); intel_edp_panel_off(intel_dp);
} }
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_GEN9_BC(dev_priv))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) | I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port))); DPLL_CTRL2_DDI_CLK_OFF(port)));
else if (INTEL_GEN(dev_priv) < 9) else if (INTEL_GEN(dev_priv) < 9)
@ -2157,7 +2157,7 @@ intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
pll->state = tmp_pll_state; pll->state = tmp_pll_state;
return NULL; return NULL;
} }
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { } else if (IS_GEN9_BC(dev_priv)) {
pll = skl_find_link_pll(dev_priv, clock); pll = skl_find_link_pll(dev_priv, clock);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
pll = hsw_ddi_dp_get_dpll(encoder, clock); pll = hsw_ddi_dp_get_dpll(encoder, clock);

@ -5800,7 +5800,7 @@ static int skl_calc_cdclk(int max_pixclk, int vco);
static void intel_update_max_cdclk(struct drm_i915_private *dev_priv) static void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{ {
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_GEN9_BC(dev_priv)) {
u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco; int max_cdclk, vco;
@ -10673,7 +10673,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_GEN9_BC(dev_priv))
skylake_get_ddi_pll(dev_priv, port, pipe_config); skylake_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_GEN9_LP(dev_priv)) else if (IS_GEN9_LP(dev_priv))
bxt_get_ddi_pll(dev_priv, port, pipe_config); bxt_get_ddi_pll(dev_priv, port, pipe_config);
@ -15681,7 +15681,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
*/ */
found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */ /* WaIgnoreDDIAStrap: skl */
if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (found || IS_GEN9_BC(dev_priv))
intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_A);
/* DDI B, C and D detection is indicated by the SFUSE_STRAP /* DDI B, C and D detection is indicated by the SFUSE_STRAP
@ -15697,7 +15697,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
/* /*
* On SKL we don't have a way to detect DDI-E so we rely on VBT. * On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/ */
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && if (IS_GEN9_BC(dev_priv) &&
(dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
@ -16196,7 +16196,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
} }
/* Returns the core display clock speed */ /* Returns the core display clock speed */
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_GEN9_BC(dev_priv))
dev_priv->display.get_display_clock_speed = dev_priv->display.get_display_clock_speed =
skylake_get_display_clock_speed; skylake_get_display_clock_speed;
else if (IS_GEN9_LP(dev_priv)) else if (IS_GEN9_LP(dev_priv))
@ -16277,7 +16277,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
bxt_modeset_commit_cdclk; bxt_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk = dev_priv->display.modeset_calc_cdclk =
bxt_modeset_calc_cdclk; bxt_modeset_calc_cdclk;
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { } else if (IS_GEN9_BC(dev_priv)) {
dev_priv->display.modeset_commit_cdclk = dev_priv->display.modeset_commit_cdclk =
skl_modeset_commit_cdclk; skl_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk = dev_priv->display.modeset_calc_cdclk =

@ -226,7 +226,7 @@ intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
if (IS_GEN9_LP(dev_priv)) { if (IS_GEN9_LP(dev_priv)) {
*source_rates = bxt_rates; *source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates); size = ARRAY_SIZE(bxt_rates);
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { } else if (IS_GEN9_BC(dev_priv)) {
*source_rates = skl_rates; *source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates); size = ARRAY_SIZE(skl_rates);
} else { } else {
@ -1751,8 +1751,7 @@ found:
* DPLL0 VCO may need to be adjusted to get the correct * DPLL0 VCO may need to be adjusted to get the correct
* clock for eDP. This will affect cdclk as well. * clock for eDP. This will affect cdclk as well.
*/ */
if (is_edp(intel_dp) && if (is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) {
int vco; int vco;
switch (pipe_config->port_clock / 2) { switch (pipe_config->port_clock / 2) {

@ -2015,7 +2015,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info; const struct dpll_info *dpll_info;
int i; int i;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_GEN9_BC(dev_priv))
dpll_mgr = &skl_pll_mgr; dpll_mgr = &skl_pll_mgr;
else if (IS_GEN9_LP(dev_priv)) else if (IS_GEN9_LP(dev_priv))
dpll_mgr = &bxt_pll_mgr; dpll_mgr = &bxt_pll_mgr;

@ -537,8 +537,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
* reserved range size, so it always assumes the maximum (8mb) is used. * reserved range size, so it always assumes the maximum (8mb) is used.
* If we enable FBC using a CFB on that memory range we'll get FIFO * If we enable FBC using a CFB on that memory range we'll get FIFO
* underruns, even if that range is not reserved by the BIOS. */ * underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(dev_priv) || if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
end = ggtt->stolen_size - 8 * 1024 * 1024; end = ggtt->stolen_size - 8 * 1024 * 1024;
else else
end = U64_MAX; end = U64_MAX;

@ -74,7 +74,7 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
{ {
if (IS_GEN9_LP(dev_priv)) if (IS_GEN9_LP(dev_priv))
return &gmbus_pins_bxt[pin]; return &gmbus_pins_bxt[pin];
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) else if (IS_GEN9_BC(dev_priv))
return &gmbus_pins_skl[pin]; return &gmbus_pins_skl[pin];
else if (IS_BROADWELL(dev_priv)) else if (IS_BROADWELL(dev_priv))
return &gmbus_pins_bdw[pin]; return &gmbus_pins_bdw[pin];
@ -89,7 +89,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
if (IS_GEN9_LP(dev_priv)) if (IS_GEN9_LP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bxt); size = ARRAY_SIZE(gmbus_pins_bxt);
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) else if (IS_GEN9_BC(dev_priv))
size = ARRAY_SIZE(gmbus_pins_skl); size = ARRAY_SIZE(gmbus_pins_skl);
else if (IS_BROADWELL(dev_priv)) else if (IS_BROADWELL(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bdw); size = ARRAY_SIZE(gmbus_pins_bdw);

@ -178,7 +178,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
{ {
bool result = false; bool result = false;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_GEN9_BC(dev_priv)) {
table->size = ARRAY_SIZE(skylake_mocs_table); table->size = ARRAY_SIZE(skylake_mocs_table);
table->table = skylake_mocs_table; table->table = skylake_mocs_table;
result = true; result = true;

@ -2895,8 +2895,7 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
{ {
struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct drm_i915_private *dev_priv = to_i915(state->base.dev);
if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
IS_KABYLAKE(dev_priv))
return true; return true;
return false; return false;
@ -5294,7 +5293,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { IS_GEN9_BC(dev_priv)) {
u32 ddcc_status = 0; u32 ddcc_status = 0;
if (sandybridge_pcode_read(dev_priv, if (sandybridge_pcode_read(dev_priv,
@ -5307,7 +5306,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
dev_priv->rps.max_freq); dev_priv->rps.max_freq);
} }
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_GEN9_BC(dev_priv)) {
/* Store the frequency values in 16.66 MHZ units, which is /* Store the frequency values in 16.66 MHZ units, which is
* the natural hardware unit for SKL * the natural hardware unit for SKL
*/ */
@ -5637,7 +5636,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
/* convert DDR frequency from units of 266.6MHz to bandwidth */ /* convert DDR frequency from units of 266.6MHz to bandwidth */
min_ring_freq = mult_frac(min_ring_freq, 8, 3); min_ring_freq = mult_frac(min_ring_freq, 8, 3);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_GEN9_BC(dev_priv)) {
/* Convert GT frequency to 50 HZ units */ /* Convert GT frequency to 50 HZ units */
min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
@ -5655,7 +5654,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
int diff = max_gpu_freq - gpu_freq; int diff = max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0; unsigned int ia_freq = 0, ring_freq = 0;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_GEN9_BC(dev_priv)) {
/* /*
* ring_freq = 2 * GT. ring_freq is in 100MHz units * ring_freq = 2 * GT. ring_freq is in 100MHz units
* No floor required for ring frequency on SKL. * No floor required for ring frequency on SKL.
@ -6775,7 +6774,7 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
} else if (INTEL_GEN(dev_priv) >= 9) { } else if (INTEL_GEN(dev_priv) >= 9) {
gen9_enable_rc6(dev_priv); gen9_enable_rc6(dev_priv);
gen9_enable_rps(dev_priv); gen9_enable_rps(dev_priv);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_GEN9_BC(dev_priv))
gen6_update_ring_freq(dev_priv); gen6_update_ring_freq(dev_priv);
} else if (IS_BROADWELL(dev_priv)) { } else if (IS_BROADWELL(dev_priv)) {
gen8_enable_rps(dev_priv); gen8_enable_rps(dev_priv);

@ -732,7 +732,7 @@ gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
* other request bits to be set, so WARN for those. * other request bits to be set, so WARN for those.
*/ */
if (power_well_id == SKL_DISP_PW_1 || if (power_well_id == SKL_DISP_PW_1 ||
((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && (IS_GEN9_BC(dev_priv) &&
power_well_id == SKL_DISP_PW_MISC_IO)) power_well_id == SKL_DISP_PW_MISC_IO))
DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on " DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
"by DMC\n", power_well->name); "by DMC\n", power_well->name);
@ -2323,7 +2323,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc; int requested_dc;
int max_dc; int max_dc;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_GEN9_BC(dev_priv)) {
max_dc = 2; max_dc = 2;
mask = 0; mask = 0;
} else if (IS_GEN9_LP(dev_priv)) { } else if (IS_GEN9_LP(dev_priv)) {
@ -2398,7 +2398,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
set_power_wells(power_domains, hsw_power_wells); set_power_wells(power_domains, hsw_power_wells);
} else if (IS_BROADWELL(dev_priv)) { } else if (IS_BROADWELL(dev_priv)) {
set_power_wells(power_domains, bdw_power_wells); set_power_wells(power_domains, bdw_power_wells);
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { } else if (IS_GEN9_BC(dev_priv)) {
set_power_wells(power_domains, skl_power_wells); set_power_wells(power_domains, skl_power_wells);
} else if (IS_BROXTON(dev_priv)) { } else if (IS_BROXTON(dev_priv)) {
set_power_wells(power_domains, bxt_power_wells); set_power_wells(power_domains, bxt_power_wells);
@ -2730,7 +2730,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
power_domains->initializing = true; power_domains->initializing = true;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_GEN9_BC(dev_priv)) {
skl_display_core_init(dev_priv, resume); skl_display_core_init(dev_priv, resume);
} else if (IS_GEN9_LP(dev_priv)) { } else if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_init(dev_priv, resume); bxt_display_core_init(dev_priv, resume);
@ -2769,7 +2769,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
if (!i915.disable_power_well) if (!i915.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_GEN9_BC(dev_priv))
skl_display_core_uninit(dev_priv); skl_display_core_uninit(dev_priv);
else if (IS_GEN9_LP(dev_priv)) else if (IS_GEN9_LP(dev_priv))
bxt_display_core_uninit(dev_priv); bxt_display_core_uninit(dev_priv);

Loading…
Cancel
Save