'ValueError: 18 columns passed, passed data had 1 columnss

    #pip install sqlalchemy 
    #from Google import Create_Service # link to source code is in the description
    import pyodbc as odbc # pip install pypyodbc
    import pandas as pd
    import sqlalchemy
    import sqlite3
    
    """
    Step 1.1 Connect to MS SQL Server Database System``
    """
    server = '192.168.102.4'
    database = ''
    username = ''
    password = ''
    driver = 'ODBC Driver 17 for SQL Server'
    def connection_string(driver , server, database):
         
        conn_string = f"""
            DRIVER={{{driver}}};
            SERVER={server};
            DATABASE={database};
            uid={username};
            pwd={password};
           
            Trust_Connection=yes;        
        """
        return conn_string

try:
    conn = odbc.connect(connection_string(driver, server, database))
    print('Connection Created')
except odbc.DatabaseError as e:
    print('Database Error:')
   
except odbc.Error as e:
    print('Connection Error:')
   
else:


    sql_query = ("SELECT TOP 1 "
"NOME = B.A1_NOME, CPF = B.A1_CGC, "
"'E-MAIL' = CASE WHEN LEN(ISNULL(CC.CLIEMAIL, '')) > 5 THEN CC.CLIEMAIL "
"WHEN LEN(ISNULL(DD.CLIEXTEMAIL2, '')) > 5 THEN DD.CLIEXTEMAIL2 "
"ELSE B.A1_EMAIL COLLATE Latin1_General_BIN END, "
"DDD = CASE WHEN LEN(ISNULL(CC.CLIDDDCELULAR, '')) > 0 THEN CC.CLIDDDCELULAR "
"WHEN LEN(ISNULL(DD.CLIEXTDDDCELULAR2, '')) > 0 THEN DD.CLIEXTDDDCELULAR2 "
"ELSE B.A1_DDD COLLATE Latin1_General_BIN END, "
"CELULAR = CASE WHEN LEN(ISNULL(CC.CLICELULAR, '')) > 5 THEN CC.CLICELULAR "
"WHEN LEN(ISNULL(DD.CLIEXTCELULAR2, '')) > 5 THEN DD.CLIEXTCELULAR2 "
"ELSE B.A1_TEL COLLATE Latin1_General_BIN END, "
"DATACADASTRO = CONVERT(VARCHAR,DATEADD(DAY,((ASCII(SUBSTRING(A1_USERLGI,12,1))-50)*100+(ASCII(SUBSTRING(A1_USERLGI,16,1))-50)),'19960101'),112), "
"ANIVERSARIO = CASE WHEN LEN(B.A1_DTNASC) > 5 THEN B.A1_DTNASC "
"ELSE CONVERT(VARCHAR(10), CC.CLIDTANASCIMENTO, 112) END, "
"ENDERECO = B.A1_END, "
"DOCUMENTO = A.L1_DOC, "
"CODIGOPRODUTO = E.L2_PRODUTO, "
"QUANTIDADE = E.L2_QUANT, "
"VALORUNITARIO = E.L2_VRUNIT, "
"VALORPEDIDO = E.L2_VLRITEM, "
"DATAPEDIDO = A.L1_DTLIM, "
"LOJA = A.L1_FILIAL, "
"CODVENDEDOR = A.L1_VEND, VENDEDOR = D.A3_NOME, "
"PDV = A.L1_PDV "
"FROM "
"[192.168.102.6].DBTOTVS12.dbo.SL1010 A, [192.168.102.6].DBTOTVS12.dbo.SA3010 D, "
"[192.168.102.6].DBTOTVS12.dbo.SL2010 E, "
"[192.168.102.6].DBTOTVS12.dbo.SA1010 B LEFT OUTER JOIN CLIENTES CC ON LEN(LTRIM(RTRIM(B.A1_CGC))) > 1 AND CONVERT(DECIMAL(14, 0), LTRIM(RTRIM(B.A1_CGC))) = CC.CLICPFCNPJ                                          LEFT OUTER JOIN CLIENTESEXTENSAO DD ON CC.CLICODIGO = DD.CLICODIGO "
"WHERE "
"A.L1_CLIENTE = B.A1_COD "
"AND A.L1_CLIENTE <> '000000001' "
"AND A.L1_DTLIM >= '20210101' "
"AND A.L1_SITUA = 'OK' "
"AND A.L1_FILIAL = E.L2_FILIAL "
"AND A.L1_NUM = E.L2_NUM "
"AND A.L1_PDV = E.L2_PDV "
"AND A.L1_DOC = E.L2_DOC "
"AND E.L2_VEND = D.A3_COD "
"AND E.L2_FILIAL = D.A3_FILIAL "
"AND A.D_E_L_E_T_ = '' "
"AND B.D_E_L_E_T_ = '' "
"AND D.D_E_L_E_T_ = '' "
"AND E.D_E_L_E_T_ = '' "
"ORDER BY L1_DTLIM " ) 

    
    cursor = conn.cursor()
    # cursor.execute(sql_query)
    cursor.execute(sql_query)

    """
    Step 1.2 Retrieve Dataset from SQL Server
    """
    recordset = cursor.fetchall()
    #print(recordset)
    columns = [col[0] for col in cursor.description]
    #df = pd.read_sql(sql_query,conn )
    #print(df.head(1000))
    #df = pd.DataFrame([data],columns=['Nome','CPF','Email','DDD','Celular','Data de Cadastro','Aniversário','Endereço','Nº do pedido','Código de produto','Quantidade de produtos','Valor Unitário','Valor do pedido','Data do pedido','Loja do pedido','cod vendedor','Vendedor responsável','PDV'])
    df = pd.DataFrame(recordset,columns=columns)
    #df = df.transpose()
      

   # if 'published_date' in df.columns:
    #    df['published_date'] = df['published_date'].dt.strftime('%Y-%m-%d %H:%M:%S')
   # recordset = df.values.tolist()


    """
    Step 2. Export Dataset to Google Spreadsheets
    """
    gs_sheet_id = '1nFC9Q9TqdatLrDSA48uW2dqQuYT7YALXWjd0vmGZuqk'
    tab_id = 0
    
    CLIENT_SECRET_FILE = 'yuryrogens.json'
    API_NAME = 'sheets'
    API_VERSION = 'v4'
    SCOPES = ['https://www.googleapis.com/auth/spreadsheets']

    service = (CLIENT_SECRET_FILE, API_NAME, API_VERSION, SCOPES)

    # create spreadsheets reference object
    mySpreadsheets = service.spreadsheets().get(
        spreadsheetId='1nFC9Q9TqdatLrDSA48uW2dqQuYT7YALXWjd0vmGZuqk'
    ).execute()

    recordset

    tab_name = [sheet['properties']['title'] for sheet in mySpreadsheets['sheets'] if sheet['properties']['sheetId'] == tab_id][0]
    
    """
    Insert dataset
    """
    def construct_request_body(value_array, dimension: str='ROWS') -> dict:
        try:
            request_body = {
                'majorDimension': dimension,
                'values': value_array
            }
            return request_body
        except Exception as e:
            print(e)
            return {}

    """
    Insert column names
    """
    request_body_columns = construct_request_body([columns])
    service.spreadsheets().values().update(
        spreadsheetId='1nFC9Q9TqdatLrDSA48uW2dqQuYT7YALXWjd0vmGZuqk',
        valueInputOption='USER_ENTERED',
        range='!A1',
        body=request_body_columns
    ).execute()

    """
    Insert rows
    """
    request_body_values = construct_request_body(recordset)
    service.spreadsheets().values().update(
        spreadsheetId='1nFC9Q9TqdatLrDSA48uW2dqQuYT7YALXWjd0vmGZuqk',
        valueInputOption='USER_ENTERED',
        range='!A2',
        body=request_body_values
    ).execute()

    print('Task is complete')

    cursor.close()
    conn.close()

when getting a query to try to throw to googlhesheets I'm having this problem using

[df = pd.DataFrame([recordset],columns=columns)

gives the error

ValueError: 18 columns past, past data had 1 columns 

and if I use

df = pd.DataFrame(recordset,columns=columns)

as well as the following error:

ValueError: Shape of pass values ​​is (1, 1), indices implicit (1, 18)


Solution 1:[1]

Could you try something like this ?

records=cursor.fetchall()

record_list=[]
    for record in records:
    record_list.append(str(record[0]))

From my experience with pyodbc, this has given me something i can work with.

We can then

pd.DataFrame(record_list)

and manipulate the resulting data as needed

Sources

This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.

Source: Stack Overflow

Solution Source
Solution 1 Daniel Weigel