root/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nvkm_dp_train_sense
  2. nvkm_dp_train_drive
  3. nvkm_dp_train_pattern
  4. nvkm_dp_train_eq
  5. nvkm_dp_train_cr
  6. nvkm_dp_train_links
  7. nvkm_dp_train_fini
  8. nvkm_dp_train_init
  9. nvkm_dp_train
  10. nvkm_dp_disable
  11. nvkm_dp_release
  12. nvkm_dp_acquire
  13. nvkm_dp_enable
  14. nvkm_dp_hpd
  15. nvkm_dp_fini
  16. nvkm_dp_init
  17. nvkm_dp_dtor
  18. nvkm_dp_ctor
  19. nvkm_dp_new

   1 /*
   2  * Copyright 2014 Red Hat Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  * Authors: Ben Skeggs
  23  */
  24 #include "dp.h"
  25 #include "conn.h"
  26 #include "head.h"
  27 #include "ior.h"
  28 
  29 #include <subdev/bios.h>
  30 #include <subdev/bios/init.h>
  31 #include <subdev/gpio.h>
  32 #include <subdev/i2c.h>
  33 
  34 #include <nvif/event.h>
  35 
  36 struct lt_state {
  37         struct nvkm_dp *dp;
  38         u8  stat[6];
  39         u8  conf[4];
  40         bool pc2;
  41         u8  pc2stat;
  42         u8  pc2conf[2];
  43 };
  44 
  45 static int
  46 nvkm_dp_train_sense(struct lt_state *lt, bool pc, u32 delay)
  47 {
  48         struct nvkm_dp *dp = lt->dp;
  49         int ret;
  50 
  51         if (dp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL])
  52                 mdelay(dp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL] * 4);
  53         else
  54                 udelay(delay);
  55 
  56         ret = nvkm_rdaux(dp->aux, DPCD_LS02, lt->stat, 6);
  57         if (ret)
  58                 return ret;
  59 
  60         if (pc) {
  61                 ret = nvkm_rdaux(dp->aux, DPCD_LS0C, &lt->pc2stat, 1);
  62                 if (ret)
  63                         lt->pc2stat = 0x00;
  64                 OUTP_TRACE(&dp->outp, "status %6ph pc2 %02x",
  65                            lt->stat, lt->pc2stat);
  66         } else {
  67                 OUTP_TRACE(&dp->outp, "status %6ph", lt->stat);
  68         }
  69 
  70         return 0;
  71 }
  72 
  73 static int
  74 nvkm_dp_train_drive(struct lt_state *lt, bool pc)
  75 {
  76         struct nvkm_dp *dp = lt->dp;
  77         struct nvkm_ior *ior = dp->outp.ior;
  78         struct nvkm_bios *bios = ior->disp->engine.subdev.device->bios;
  79         struct nvbios_dpout info;
  80         struct nvbios_dpcfg ocfg;
  81         u8  ver, hdr, cnt, len;
  82         u32 data;
  83         int ret, i;
  84 
  85         for (i = 0; i < ior->dp.nr; i++) {
  86                 u8 lane = (lt->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
  87                 u8 lpc2 = (lt->pc2stat >> (i * 2)) & 0x3;
  88                 u8 lpre = (lane & 0x0c) >> 2;
  89                 u8 lvsw = (lane & 0x03) >> 0;
  90                 u8 hivs = 3 - lpre;
  91                 u8 hipe = 3;
  92                 u8 hipc = 3;
  93 
  94                 if (lpc2 >= hipc)
  95                         lpc2 = hipc | DPCD_LC0F_LANE0_MAX_POST_CURSOR2_REACHED;
  96                 if (lpre >= hipe) {
  97                         lpre = hipe | DPCD_LC03_MAX_SWING_REACHED; /* yes. */
  98                         lvsw = hivs = 3 - (lpre & 3);
  99                 } else
 100                 if (lvsw >= hivs) {
 101                         lvsw = hivs | DPCD_LC03_MAX_SWING_REACHED;
 102                 }
 103 
 104                 lt->conf[i] = (lpre << 3) | lvsw;
 105                 lt->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4);
 106 
 107                 OUTP_TRACE(&dp->outp, "config lane %d %02x %02x",
 108                            i, lt->conf[i], lpc2);
 109 
 110                 data = nvbios_dpout_match(bios, dp->outp.info.hasht,
 111                                                 dp->outp.info.hashm,
 112                                           &ver, &hdr, &cnt, &len, &info);
 113                 if (!data)
 114                         continue;
 115 
 116                 data = nvbios_dpcfg_match(bios, data, lpc2 & 3, lvsw & 3,
 117                                           lpre & 3, &ver, &hdr, &cnt, &len,
 118                                           &ocfg);
 119                 if (!data)
 120                         continue;
 121 
 122                 ior->func->dp.drive(ior, i, ocfg.pc, ocfg.dc,
 123                                             ocfg.pe, ocfg.tx_pu);
 124         }
 125 
 126         ret = nvkm_wraux(dp->aux, DPCD_LC03(0), lt->conf, 4);
 127         if (ret)
 128                 return ret;
 129 
 130         if (pc) {
 131                 ret = nvkm_wraux(dp->aux, DPCD_LC0F, lt->pc2conf, 2);
 132                 if (ret)
 133                         return ret;
 134         }
 135 
 136         return 0;
 137 }
 138 
 139 static void
 140 nvkm_dp_train_pattern(struct lt_state *lt, u8 pattern)
 141 {
 142         struct nvkm_dp *dp = lt->dp;
 143         u8 sink_tp;
 144 
 145         OUTP_TRACE(&dp->outp, "training pattern %d", pattern);
 146         dp->outp.ior->func->dp.pattern(dp->outp.ior, pattern);
 147 
 148         nvkm_rdaux(dp->aux, DPCD_LC02, &sink_tp, 1);
 149         sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
 150         sink_tp |= pattern;
 151         nvkm_wraux(dp->aux, DPCD_LC02, &sink_tp, 1);
 152 }
 153 
 154 static int
 155 nvkm_dp_train_eq(struct lt_state *lt)
 156 {
 157         bool eq_done = false, cr_done = true;
 158         int tries = 0, i;
 159 
 160         if (lt->dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED)
 161                 nvkm_dp_train_pattern(lt, 3);
 162         else
 163                 nvkm_dp_train_pattern(lt, 2);
 164 
 165         do {
 166                 if ((tries &&
 167                     nvkm_dp_train_drive(lt, lt->pc2)) ||
 168                     nvkm_dp_train_sense(lt, lt->pc2, 400))
 169                         break;
 170 
 171                 eq_done = !!(lt->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
 172                 for (i = 0; i < lt->dp->outp.ior->dp.nr && eq_done; i++) {
 173                         u8 lane = (lt->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
 174                         if (!(lane & DPCD_LS02_LANE0_CR_DONE))
 175                                 cr_done = false;
 176                         if (!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
 177                             !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
 178                                 eq_done = false;
 179                 }
 180         } while (!eq_done && cr_done && ++tries <= 5);
 181 
 182         return eq_done ? 0 : -1;
 183 }
 184 
 185 static int
 186 nvkm_dp_train_cr(struct lt_state *lt)
 187 {
 188         bool cr_done = false, abort = false;
 189         int voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
 190         int tries = 0, i;
 191 
 192         nvkm_dp_train_pattern(lt, 1);
 193 
 194         do {
 195                 if (nvkm_dp_train_drive(lt, false) ||
 196                     nvkm_dp_train_sense(lt, false, 100))
 197                         break;
 198 
 199                 cr_done = true;
 200                 for (i = 0; i < lt->dp->outp.ior->dp.nr; i++) {
 201                         u8 lane = (lt->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
 202                         if (!(lane & DPCD_LS02_LANE0_CR_DONE)) {
 203                                 cr_done = false;
 204                                 if (lt->conf[i] & DPCD_LC03_MAX_SWING_REACHED)
 205                                         abort = true;
 206                                 break;
 207                         }
 208                 }
 209 
 210                 if ((lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET) != voltage) {
 211                         voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
 212                         tries = 0;
 213                 }
 214         } while (!cr_done && !abort && ++tries < 5);
 215 
 216         return cr_done ? 0 : -1;
 217 }
 218 
 219 static int
 220 nvkm_dp_train_links(struct nvkm_dp *dp)
 221 {
 222         struct nvkm_ior *ior = dp->outp.ior;
 223         struct nvkm_disp *disp = dp->outp.disp;
 224         struct nvkm_subdev *subdev = &disp->engine.subdev;
 225         struct nvkm_bios *bios = subdev->device->bios;
 226         struct lt_state lt = {
 227                 .dp = dp,
 228         };
 229         u32 lnkcmp;
 230         u8 sink[2];
 231         int ret;
 232 
 233         OUTP_DBG(&dp->outp, "training %d x %d MB/s",
 234                  ior->dp.nr, ior->dp.bw * 27);
 235 
 236         /* Intersect misc. capabilities of the OR and sink. */
 237         if (disp->engine.subdev.device->chipset < 0xd0)
 238                 dp->dpcd[DPCD_RC02] &= ~DPCD_RC02_TPS3_SUPPORTED;
 239         lt.pc2 = dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED;
 240 
 241         /* Set desired link configuration on the source. */
 242         if ((lnkcmp = lt.dp->info.lnkcmp)) {
 243                 if (dp->version < 0x30) {
 244                         while ((ior->dp.bw * 2700) < nvbios_rd16(bios, lnkcmp))
 245                                 lnkcmp += 4;
 246                         lnkcmp = nvbios_rd16(bios, lnkcmp + 2);
 247                 } else {
 248                         while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
 249                                 lnkcmp += 3;
 250                         lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
 251                 }
 252 
 253                 nvbios_init(subdev, lnkcmp,
 254                         init.outp = &dp->outp.info;
 255                         init.or   = ior->id;
 256                         init.link = ior->asy.link;
 257                 );
 258         }
 259 
 260         ret = ior->func->dp.links(ior, dp->aux);
 261         if (ret) {
 262                 if (ret < 0) {
 263                         OUTP_ERR(&dp->outp, "train failed with %d", ret);
 264                         return ret;
 265                 }
 266                 return 0;
 267         }
 268 
 269         ior->func->dp.power(ior, ior->dp.nr);
 270 
 271         /* Set desired link configuration on the sink. */
 272         sink[0] = ior->dp.bw;
 273         sink[1] = ior->dp.nr;
 274         if (ior->dp.ef)
 275                 sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
 276 
 277         ret = nvkm_wraux(dp->aux, DPCD_LC00_LINK_BW_SET, sink, 2);
 278         if (ret)
 279                 return ret;
 280 
 281         /* Attempt to train the link in this configuration. */
 282         memset(lt.stat, 0x00, sizeof(lt.stat));
 283         ret = nvkm_dp_train_cr(&lt);
 284         if (ret == 0)
 285                 ret = nvkm_dp_train_eq(&lt);
 286         nvkm_dp_train_pattern(&lt, 0);
 287         return ret;
 288 }
 289 
 290 static void
 291 nvkm_dp_train_fini(struct nvkm_dp *dp)
 292 {
 293         /* Execute AfterLinkTraining script from DP Info table. */
 294         nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[1],
 295                 init.outp = &dp->outp.info;
 296                 init.or   = dp->outp.ior->id;
 297                 init.link = dp->outp.ior->asy.link;
 298         );
 299 }
 300 
 301 static void
 302 nvkm_dp_train_init(struct nvkm_dp *dp)
 303 {
 304         /* Execute EnableSpread/DisableSpread script from DP Info table. */
 305         if (dp->dpcd[DPCD_RC03] & DPCD_RC03_MAX_DOWNSPREAD) {
 306                 nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[2],
 307                         init.outp = &dp->outp.info;
 308                         init.or   = dp->outp.ior->id;
 309                         init.link = dp->outp.ior->asy.link;
 310                 );
 311         } else {
 312                 nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[3],
 313                         init.outp = &dp->outp.info;
 314                         init.or   = dp->outp.ior->id;
 315                         init.link = dp->outp.ior->asy.link;
 316                 );
 317         }
 318 
 319         /* Execute BeforeLinkTraining script from DP Info table. */
 320         nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[0],
 321                 init.outp = &dp->outp.info;
 322                 init.or   = dp->outp.ior->id;
 323                 init.link = dp->outp.ior->asy.link;
 324         );
 325 }
 326 
 327 static const struct dp_rates {
 328         u32 rate;
 329         u8  bw;
 330         u8  nr;
 331 } nvkm_dp_rates[] = {
 332         { 2160000, 0x14, 4 },
 333         { 1080000, 0x0a, 4 },
 334         { 1080000, 0x14, 2 },
 335         {  648000, 0x06, 4 },
 336         {  540000, 0x0a, 2 },
 337         {  540000, 0x14, 1 },
 338         {  324000, 0x06, 2 },
 339         {  270000, 0x0a, 1 },
 340         {  162000, 0x06, 1 },
 341         {}
 342 };
 343 
 344 static int
 345 nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
 346 {
 347         struct nvkm_ior *ior = dp->outp.ior;
 348         const u8 sink_nr = dp->dpcd[DPCD_RC02] & DPCD_RC02_MAX_LANE_COUNT;
 349         const u8 sink_bw = dp->dpcd[DPCD_RC01_MAX_LINK_RATE];
 350         const u8 outp_nr = dp->outp.info.dpconf.link_nr;
 351         const u8 outp_bw = dp->outp.info.dpconf.link_bw;
 352         const struct dp_rates *failsafe = NULL, *cfg;
 353         int ret = -EINVAL;
 354         u8  pwr;
 355 
 356         /* Find the lowest configuration of the OR that can support
 357          * the required link rate.
 358          *
 359          * We will refuse to program the OR to lower rates, even if
 360          * link training fails at higher rates (or even if the sink
 361          * can't support the rate at all, though the DD is supposed
 362          * to prevent such situations from happening).
 363          *
 364          * Attempting to do so can cause the entire display to hang,
 365          * and it's better to have a failed modeset than that.
 366          */
 367         for (cfg = nvkm_dp_rates; cfg->rate; cfg++) {
 368                 if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) {
 369                         /* Try to respect sink limits too when selecting
 370                          * lowest link configuration.
 371                          */
 372                         if (!failsafe ||
 373                             (cfg->nr <= sink_nr && cfg->bw <= sink_bw))
 374                                 failsafe = cfg;
 375                 }
 376 
 377                 if (failsafe && cfg[1].rate < dataKBps)
 378                         break;
 379         }
 380 
 381         if (WARN_ON(!failsafe))
 382                 return ret;
 383 
 384         /* Ensure sink is not in a low-power state. */
 385         if (!nvkm_rdaux(dp->aux, DPCD_SC00, &pwr, 1)) {
 386                 if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
 387                         pwr &= ~DPCD_SC00_SET_POWER;
 388                         pwr |=  DPCD_SC00_SET_POWER_D0;
 389                         nvkm_wraux(dp->aux, DPCD_SC00, &pwr, 1);
 390                 }
 391         }
 392 
 393         /* Link training. */
 394         OUTP_DBG(&dp->outp, "training (min: %d x %d MB/s)",
 395                  failsafe->nr, failsafe->bw * 27);
 396         nvkm_dp_train_init(dp);
 397         for (cfg = nvkm_dp_rates; ret < 0 && cfg <= failsafe; cfg++) {
 398                 /* Skip configurations not supported by both OR and sink. */
 399                 if ((cfg->nr > outp_nr || cfg->bw > outp_bw ||
 400                      cfg->nr > sink_nr || cfg->bw > sink_bw)) {
 401                         if (cfg != failsafe)
 402                                 continue;
 403                         OUTP_ERR(&dp->outp, "link rate unsupported by sink");
 404                 }
 405                 ior->dp.mst = dp->lt.mst;
 406                 ior->dp.ef = dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP;
 407                 ior->dp.bw = cfg->bw;
 408                 ior->dp.nr = cfg->nr;
 409 
 410                 /* Program selected link configuration. */
 411                 ret = nvkm_dp_train_links(dp);
 412         }
 413         nvkm_dp_train_fini(dp);
 414         if (ret < 0)
 415                 OUTP_ERR(&dp->outp, "training failed");
 416         else
 417                 OUTP_DBG(&dp->outp, "training done");
 418         atomic_set(&dp->lt.done, 1);
 419         return ret;
 420 }
 421 
 422 static void
 423 nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
 424 {
 425         struct nvkm_dp *dp = nvkm_dp(outp);
 426 
 427         /* Execute DisableLT script from DP Info Table. */
 428         nvbios_init(&ior->disp->engine.subdev, dp->info.script[4],
 429                 init.outp = &dp->outp.info;
 430                 init.or   = ior->id;
 431                 init.link = ior->arm.link;
 432         );
 433 }
 434 
 435 static void
 436 nvkm_dp_release(struct nvkm_outp *outp)
 437 {
 438         struct nvkm_dp *dp = nvkm_dp(outp);
 439 
 440         /* Prevent link from being retrained if sink sends an IRQ. */
 441         atomic_set(&dp->lt.done, 0);
 442         dp->outp.ior->dp.nr = 0;
 443 }
 444 
 445 static int
 446 nvkm_dp_acquire(struct nvkm_outp *outp)
 447 {
 448         struct nvkm_dp *dp = nvkm_dp(outp);
 449         struct nvkm_ior *ior = dp->outp.ior;
 450         struct nvkm_head *head;
 451         bool retrain = true;
 452         u32 datakbps = 0;
 453         u32 dataKBps;
 454         u32 linkKBps;
 455         u8  stat[3];
 456         int ret, i;
 457 
 458         mutex_lock(&dp->mutex);
 459 
 460         /* Check that link configuration meets current requirements. */
 461         list_for_each_entry(head, &outp->disp->head, head) {
 462                 if (ior->asy.head & (1 << head->id)) {
 463                         u32 khz = (head->asy.hz >> ior->asy.rgdiv) / 1000;
 464                         datakbps += khz * head->asy.or.depth;
 465                 }
 466         }
 467 
 468         linkKBps = ior->dp.bw * 27000 * ior->dp.nr;
 469         dataKBps = DIV_ROUND_UP(datakbps, 8);
 470         OUTP_DBG(&dp->outp, "data %d KB/s link %d KB/s mst %d->%d",
 471                  dataKBps, linkKBps, ior->dp.mst, dp->lt.mst);
 472         if (linkKBps < dataKBps || ior->dp.mst != dp->lt.mst) {
 473                 OUTP_DBG(&dp->outp, "link requirements changed");
 474                 goto done;
 475         }
 476 
 477         /* Check that link is still trained. */
 478         ret = nvkm_rdaux(dp->aux, DPCD_LS02, stat, 3);
 479         if (ret) {
 480                 OUTP_DBG(&dp->outp,
 481                          "failed to read link status, assuming no sink");
 482                 goto done;
 483         }
 484 
 485         if (stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE) {
 486                 for (i = 0; i < ior->dp.nr; i++) {
 487                         u8 lane = (stat[i >> 1] >> ((i & 1) * 4)) & 0x0f;
 488                         if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
 489                             !(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
 490                             !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
 491                                 OUTP_DBG(&dp->outp,
 492                                          "lane %d not equalised", lane);
 493                                 goto done;
 494                         }
 495                 }
 496                 retrain = false;
 497         } else {
 498                 OUTP_DBG(&dp->outp, "no inter-lane alignment");
 499         }
 500 
 501 done:
 502         if (retrain || !atomic_read(&dp->lt.done))
 503                 ret = nvkm_dp_train(dp, dataKBps);
 504         mutex_unlock(&dp->mutex);
 505         return ret;
 506 }
 507 
 508 static bool
 509 nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
 510 {
 511         struct nvkm_i2c_aux *aux = dp->aux;
 512 
 513         if (enable) {
 514                 if (!dp->present) {
 515                         OUTP_DBG(&dp->outp, "aux power -> always");
 516                         nvkm_i2c_aux_monitor(aux, true);
 517                         dp->present = true;
 518                 }
 519 
 520                 if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd,
 521                                 sizeof(dp->dpcd)))
 522                         return true;
 523         }
 524 
 525         if (dp->present) {
 526                 OUTP_DBG(&dp->outp, "aux power -> demand");
 527                 nvkm_i2c_aux_monitor(aux, false);
 528                 dp->present = false;
 529         }
 530 
 531         atomic_set(&dp->lt.done, 0);
 532         return false;
 533 }
 534 
 535 static int
 536 nvkm_dp_hpd(struct nvkm_notify *notify)
 537 {
 538         const struct nvkm_i2c_ntfy_rep *line = notify->data;
 539         struct nvkm_dp *dp = container_of(notify, typeof(*dp), hpd);
 540         struct nvkm_conn *conn = dp->outp.conn;
 541         struct nvkm_disp *disp = dp->outp.disp;
 542         struct nvif_notify_conn_rep_v0 rep = {};
 543 
 544         OUTP_DBG(&dp->outp, "HPD: %d", line->mask);
 545         if (line->mask & NVKM_I2C_IRQ) {
 546                 if (atomic_read(&dp->lt.done))
 547                         dp->outp.func->acquire(&dp->outp);
 548                 rep.mask |= NVIF_NOTIFY_CONN_V0_IRQ;
 549         } else {
 550                 nvkm_dp_enable(dp, true);
 551         }
 552 
 553         if (line->mask & NVKM_I2C_UNPLUG)
 554                 rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
 555         if (line->mask & NVKM_I2C_PLUG)
 556                 rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
 557 
 558         nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
 559         return NVKM_NOTIFY_KEEP;
 560 }
 561 
 562 static void
 563 nvkm_dp_fini(struct nvkm_outp *outp)
 564 {
 565         struct nvkm_dp *dp = nvkm_dp(outp);
 566         nvkm_notify_put(&dp->hpd);
 567         nvkm_dp_enable(dp, false);
 568 }
 569 
 570 static void
 571 nvkm_dp_init(struct nvkm_outp *outp)
 572 {
 573         struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
 574         struct nvkm_dp *dp = nvkm_dp(outp);
 575 
 576         nvkm_notify_put(&dp->outp.conn->hpd);
 577 
 578         /* eDP panels need powering on by us (if the VBIOS doesn't default it
 579          * to on) before doing any AUX channel transactions.  LVDS panel power
 580          * is handled by the SOR itself, and not required for LVDS DDC.
 581          */
 582         if (dp->outp.conn->info.type == DCB_CONNECTOR_eDP) {
 583                 int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
 584                 if (power == 0)
 585                         nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
 586 
 587                 /* We delay here unconditionally, even if already powered,
 588                  * because some laptop panels having a significant resume
 589                  * delay before the panel begins responding.
 590                  *
 591                  * This is likely a bit of a hack, but no better idea for
 592                  * handling this at the moment.
 593                  */
 594                 msleep(300);
 595 
 596                 /* If the eDP panel can't be detected, we need to restore
 597                  * the panel power GPIO to avoid breaking another output.
 598                  */
 599                 if (!nvkm_dp_enable(dp, true) && power == 0)
 600                         nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);
 601         } else {
 602                 nvkm_dp_enable(dp, true);
 603         }
 604 
 605         nvkm_notify_get(&dp->hpd);
 606 }
 607 
 608 static void *
 609 nvkm_dp_dtor(struct nvkm_outp *outp)
 610 {
 611         struct nvkm_dp *dp = nvkm_dp(outp);
 612         nvkm_notify_fini(&dp->hpd);
 613         return dp;
 614 }
 615 
 616 static const struct nvkm_outp_func
 617 nvkm_dp_func = {
 618         .dtor = nvkm_dp_dtor,
 619         .init = nvkm_dp_init,
 620         .fini = nvkm_dp_fini,
 621         .acquire = nvkm_dp_acquire,
 622         .release = nvkm_dp_release,
 623         .disable = nvkm_dp_disable,
 624 };
 625 
 626 static int
 627 nvkm_dp_ctor(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
 628              struct nvkm_i2c_aux *aux, struct nvkm_dp *dp)
 629 {
 630         struct nvkm_device *device = disp->engine.subdev.device;
 631         struct nvkm_bios *bios = device->bios;
 632         struct nvkm_i2c *i2c = device->i2c;
 633         u8  hdr, cnt, len;
 634         u32 data;
 635         int ret;
 636 
 637         ret = nvkm_outp_ctor(&nvkm_dp_func, disp, index, dcbE, &dp->outp);
 638         if (ret)
 639                 return ret;
 640 
 641         dp->aux = aux;
 642         if (!dp->aux) {
 643                 OUTP_ERR(&dp->outp, "no aux");
 644                 return -EINVAL;
 645         }
 646 
 647         /* bios data is not optional */
 648         data = nvbios_dpout_match(bios, dp->outp.info.hasht,
 649                                   dp->outp.info.hashm, &dp->version,
 650                                   &hdr, &cnt, &len, &dp->info);
 651         if (!data) {
 652                 OUTP_ERR(&dp->outp, "no bios dp data");
 653                 return -EINVAL;
 654         }
 655 
 656         OUTP_DBG(&dp->outp, "bios dp %02x %02x %02x %02x",
 657                  dp->version, hdr, cnt, len);
 658 
 659         /* hotplug detect, replaces gpio-based mechanism with aux events */
 660         ret = nvkm_notify_init(NULL, &i2c->event, nvkm_dp_hpd, true,
 661                                &(struct nvkm_i2c_ntfy_req) {
 662                                 .mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG |
 663                                         NVKM_I2C_IRQ,
 664                                 .port = dp->aux->id,
 665                                },
 666                                sizeof(struct nvkm_i2c_ntfy_req),
 667                                sizeof(struct nvkm_i2c_ntfy_rep),
 668                                &dp->hpd);
 669         if (ret) {
 670                 OUTP_ERR(&dp->outp, "error monitoring aux hpd: %d", ret);
 671                 return ret;
 672         }
 673 
 674         mutex_init(&dp->mutex);
 675         atomic_set(&dp->lt.done, 0);
 676         return 0;
 677 }
 678 
 679 int
 680 nvkm_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
 681             struct nvkm_outp **poutp)
 682 {
 683         struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
 684         struct nvkm_i2c_aux *aux;
 685         struct nvkm_dp *dp;
 686 
 687         if (dcbE->location == 0)
 688                 aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_CCB(dcbE->i2c_index));
 689         else
 690                 aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbE->extdev));
 691 
 692         if (!(dp = kzalloc(sizeof(*dp), GFP_KERNEL)))
 693                 return -ENOMEM;
 694         *poutp = &dp->outp;
 695 
 696         return nvkm_dp_ctor(disp, index, dcbE, aux, dp);
 697 }

/* [<][>][^][v][top][bottom][index][help] */